repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/modes.rs
sway-core/src/semantic_analysis/ast_node/modes.rs
use crate::{decl_engine::DeclId, language::ty::TyAbiDecl}; #[derive(Clone, PartialEq, Eq, Default, Debug)] pub enum AbiMode { ImplAbiFn(sway_types::Ident, Option<DeclId<TyAbiDecl>>), #[default] NonAbi, } #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] pub enum ConstShadowingMode { Allow, Sequential, #[default] ItemStyle, } #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] pub enum GenericShadowingMode { Disallow, #[default] Allow, }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/mode.rs
sway-core/src/semantic_analysis/ast_node/mode.rs
#[derive(Clone, PartialEq, Eq, Default)] pub enum Mode { ImplAbiFn(sway_types::Ident), #[default] NonAbi, }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/code_block.rs
sway-core/src/semantic_analysis/ast_node/code_block.rs
use super::*; use crate::language::{ parsed::CodeBlock, ty::{self, TyAstNodeContent, TyCodeBlock}, }; impl ty::TyCodeBlock { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, code_block: &CodeBlock, ) -> Result<(), ErrorEmitted> { let _ = ctx.scoped( engines, code_block.whole_block_span.clone(), None, |scoped_ctx| { let _ = code_block .contents .iter() .map(|node| ty::TyAstNode::collect(handler, engines, scoped_ctx, node)) .filter_map(|res| res.ok()) .collect::<Vec<_>>(); Ok(()) }, ); Ok(()) } pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, code_block: &CodeBlock, is_root: bool, ) -> Result<Self, ErrorEmitted> { if !is_root { let code_block_result = ctx.by_ref() .scoped(handler, Some(code_block.span()), |ctx| { let evaluated_contents = code_block .contents .iter() .filter_map(|node| { ty::TyAstNode::type_check(handler, ctx.by_ref(), node).ok() }) .collect::<Vec<ty::TyAstNode>>(); Ok(ty::TyCodeBlock { contents: evaluated_contents, whole_block_span: code_block.whole_block_span.clone(), }) })?; return Ok(code_block_result); } ctx.engines.te().clear_unifications(); ctx.namespace() .current_module() .current_lexical_scope() .items .clear_symbols_unique_while_collecting_unifications(); // We are typechecking the code block AST nodes twice. // The first pass does all the unifications to the variables types. // In the second pass we use the previous_namespace on variable declaration to unify directly with the result of the first pass. // This is required to fix the test case numeric_type_propagation and issue #6371 ctx.by_ref() .with_collecting_unifications() .with_code_block_first_pass(true) .scoped(handler, Some(code_block.span()), |ctx| { code_block.contents.iter().for_each(|node| { ty::TyAstNode::type_check(&Handler::default(), ctx.by_ref(), node).ok(); }); Ok(()) })?; ctx.engines.te().reapply_unifications(ctx.engines(), 0); ctx.by_ref() .scoped(handler, Some(code_block.span()), |ctx| { let evaluated_contents = code_block .contents .iter() .filter_map(|node| ty::TyAstNode::type_check(handler, ctx.by_ref(), node).ok()) .collect::<Vec<ty::TyAstNode>>(); Ok(ty::TyCodeBlock { contents: evaluated_contents, whole_block_span: code_block.whole_block_span.clone(), }) }) } pub fn compute_return_type_and_span( ctx: &TypeCheckContext, code_block: &TyCodeBlock, ) -> (TypeId, Span) { let implicit_return_span = code_block .contents .iter() .find_map(|x| match &x.content { TyAstNodeContent::Expression(ty::TyExpression { expression: ty::TyExpressionVariant::ImplicitReturn(expr), .. }) => Some(Some(expr.span.clone())), _ => None, }) .flatten(); let span = implicit_return_span.unwrap_or_else(|| code_block.whole_block_span.clone()); let block_type = code_block .contents .iter() .find_map(|node| { match node { // If an ast node of the block returns, panics, breaks, or continues then the whole block should have `Never` as return type. ty::TyAstNode { content: ty::TyAstNodeContent::Expression(ty::TyExpression { expression: ty::TyExpressionVariant::Return(_) | ty::TyExpressionVariant::Panic(_) | ty::TyExpressionVariant::Break | ty::TyExpressionVariant::Continue, .. }), .. } => Some(ctx.engines.te().id_of_never()), // Find the implicit return, if any, and use it as the code block's return type. // The fact that there is at most one implicit return is an invariant held by the parser. ty::TyAstNode { content: ty::TyAstNodeContent::Expression(ty::TyExpression { expression: ty::TyExpressionVariant::ImplicitReturn(_expr), return_type, .. }), .. } => Some(*return_type), // If an ast node of the block has Never as return type then the whole block should have Never as return type. ty::TyAstNode { content: ty::TyAstNodeContent::Expression(ty::TyExpression { return_type, .. }), .. } => { if matches!(*ctx.engines.te().get(*return_type), TypeInfo::Never) { Some(*return_type) } else { None } } _ => None, } }) .unwrap_or_else(|| ctx.engines.te().id_of_unit()); (block_type, span) } } impl TypeCheckAnalysis for ty::TyCodeBlock { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { for node in self.contents.iter() { node.type_check_analyze(handler, ctx)?; } Ok(()) } } impl TypeCheckFinalization for ty::TyCodeBlock { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for node in self.contents.iter_mut() { let _ = node.type_check_finalize(handler, ctx); } Ok(()) }) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/struct.rs
sway-core/src/semantic_analysis/ast_node/declaration/struct.rs
use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{parsed::*, ty, CallPath}, semantic_analysis::*, type_system::*, Engines, }; use ast_elements::type_parameter::GenericTypeParameter; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use symbol_collection_context::SymbolCollectionContext; impl ty::TyStructDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<StructDeclaration>, ) -> Result<(), ErrorEmitted> { let struct_decl = engines.pe().get_struct(decl_id); let decl = Declaration::StructDeclaration(*decl_id); ctx.insert_parsed_symbol(handler, engines, struct_decl.name.clone(), decl.clone())?; // create a namespace for the decl, used to create a scope for generics let _ = ctx.scoped( engines, struct_decl.span.clone(), Some(decl), |_scoped_ctx| Ok(()), ); Ok(()) } pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, decl: StructDeclaration, ) -> Result<Self, ErrorEmitted> { let StructDeclaration { name, fields, type_parameters, visibility, span, attributes, .. } = decl; // create a namespace for the decl, used to create a scope for generics ctx.scoped(handler, Some(span.clone()), |ctx| { // Type check the type parameters. let new_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), type_parameters, None, )?; // type check the fields let mut new_fields = vec![]; let mut encountered_non_indexed_field = false; for field in fields.into_iter() { let ty_field = ty::TyStructField::type_check(handler, ctx.by_ref(), field)?; if ty_field.attributes.indexed().is_some() && attributes.event().is_none() { return Err( handler.emit_err(CompileError::IndexedFieldInNonEventStruct { field_name: ty_field.name.into(), struct_name: name.into(), }), ); } if ty_field.attributes.indexed().is_some() { let abi_size_hint = ctx .engines() .te() .get(ty_field.type_argument.type_id) .abi_encode_size_hint(ctx.engines()); if encountered_non_indexed_field { return Err(handler.emit_err( CompileError::IndexedFieldMustPrecedeNonIndexedField { field_name: ty_field.name.into(), }, )); } if !matches!(abi_size_hint, AbiEncodeSizeHint::Exact(_)) { return Err(handler.emit_err( CompileError::IndexedFieldIsNotFixedSizeABIType { field_name: ty_field.name.into(), }, )); } } else { encountered_non_indexed_field = true; } new_fields.push(ty_field); } let path = CallPath::ident_to_fullpath(name, ctx.namespace()); // create the struct decl let decl = ty::TyStructDecl { call_path: path, generic_parameters: new_type_parameters, fields: new_fields, visibility, span, attributes, }; Ok(decl) }) } } impl ty::TyStructField { pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, field: StructField, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let mut type_argument = field.type_argument; type_argument.type_id = ctx .resolve_type( handler, type_argument.type_id, &type_argument.span, EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let field = ty::TyStructField { visibility: field.visibility, name: field.name, span: field.span, type_argument, attributes: field.attributes, }; Ok(field) } } impl TypeCheckAnalysis for ty::TyStructDecl { fn type_check_analyze( &self, _handler: &Handler, _ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { Ok(()) } } impl TypeCheckFinalization for ty::TyStructDecl { fn type_check_finalize( &mut self, _handler: &Handler, _ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/variable.rs
sway-core/src/semantic_analysis/ast_node/declaration/variable.rs
use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{ parsed::*, ty::{self, TyExpression, TyVariableDecl}, }, semantic_analysis::*, type_system::*, Engines, }; use namespace::ResolvedDeclaration; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Spanned; use symbol_collection_context::SymbolCollectionContext; impl ty::TyVariableDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<VariableDeclaration>, ) -> Result<(), ErrorEmitted> { let var_decl = engines.pe().get_variable(decl_id); ctx.insert_parsed_symbol( handler, engines, var_decl.name.clone(), Declaration::VariableDeclaration(*decl_id), )?; TyExpression::collect(handler, engines, ctx, &var_decl.body) } pub fn type_check( handler: &Handler, ctx: TypeCheckContext, var_decl: VariableDeclaration, ) -> Result<Self, ErrorEmitted> { let engines = &ctx.engines(); let type_engine = engines.te(); let mut type_ascription = var_decl.type_ascription.clone(); type_ascription.type_id = ctx .resolve_type( handler, type_ascription.type_id, &type_ascription.span, EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let mut ctx = ctx .with_type_annotation(type_ascription.type_id) .with_help_text( "Variable declaration's type annotation does not match up \ with the assigned expression's type.", ); let result = ty::TyExpression::type_check(handler, ctx.by_ref(), &var_decl.body); let body = result .unwrap_or_else(|err| ty::TyExpression::error(err, var_decl.name.span(), engines)); // Determine the type of the variable going forward. Typically this is the type of the RHS, // but in some cases we need to use the type ascription instead. // TODO: We should not have these special cases. The typecheck expressions should be written // in a way to always use the context provided by the LHS, and use the unified type of LHS // and RHS as the return type of the RHS. Remove this special case as a part of the // initiative of improving type inference. let return_type = match (&*type_engine.get(type_ascription.type_id), &*type_engine.get(body.return_type)) { // Integers: We can't rely on the type of the RHS to give us the correct integer // type, so the type of the variable *has* to follow `type_ascription` if // `type_ascription` is a concrete integer type that does not conflict with the type // of `body` (i.e. passes the type checking above). (TypeInfo::UnsignedInteger(_), _) | // Never: If the RHS resolves to Never, then any code following the declaration is // unreachable. If the variable is used later on, then it should be treated as // having the same type as the type ascription. (_, TypeInfo::Never) | // If RHS type check ends up in an error we want to use the // provided type ascription as the variable type. E.g.: // let v: Struct<u8> = Struct<u64> { x: 0 }; // `v` should be "Struct<u8>". // let v: ExistingType = non_existing_identifier; // `v` should be "ExistingType". // let v = <some error>; // `v` will remain "{unknown}". // TODO: Refine and improve this further. E.g., // let v: Struct { /* MISSING FIELDS */ }; // Despite the error, `v` should be of type "Struct". (_, TypeInfo::ErrorRecovery(_)) => type_ascription.type_id, // In all other cases we use the type of the RHS. _ => body.return_type, }; if !ctx.code_block_first_pass() { let previous_symbol = ctx .namespace() .current_module() .current_items() .check_symbols_unique_while_collecting_unifications(&var_decl.name.clone()) .ok(); if let Some(ResolvedDeclaration::Typed(ty::TyDecl::VariableDecl(variable_decl))) = previous_symbol { type_engine.unify( handler, engines, body.return_type, variable_decl.body.return_type, &variable_decl.span(), "", || None, ); } } let typed_var_decl = ty::TyVariableDecl { name: var_decl.name.clone(), body, mutability: ty::VariableMutability::new_from_ref_mut(false, var_decl.is_mutable), return_type, type_ascription, }; Ok(typed_var_decl) } } impl TypeCheckAnalysis for TyVariableDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { self.body.type_check_analyze(handler, ctx)?; Ok(()) } } impl TypeCheckFinalization for TyVariableDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { self.body.type_check_finalize(handler, ctx) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/declaration.rs
sway-core/src/semantic_analysis/ast_node/declaration/declaration.rs
use ast_elements::type_argument::GenericTypeArgument; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Named, Spanned}; use crate::{ decl_engine::{DeclEngineGet, DeclEngineInsert, DeclRef, ReplaceFunctionImplementingType}, language::{ parsed::{self, StorageEntry}, ty::{ self, FunctionDecl, TyAbiDecl, TyConfigurableDecl, TyConstantDecl, TyDecl, TyEnumDecl, TyFunctionDecl, TyImplSelfOrTrait, TyStorageDecl, TyStorageField, TyStructDecl, TyTraitDecl, TyTraitFn, TyTraitType, TyTypeAliasDecl, TyVariableDecl, }, CallPath, }, namespace::{IsExtendingExistingImpl, IsImplInterfaceSurface, IsImplSelf, Items}, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, ConstShadowingMode, GenericShadowingMode, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, type_system::*, Engines, }; impl TyDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl: parsed::Declaration, ) -> Result<(), ErrorEmitted> { match &decl { parsed::Declaration::VariableDeclaration(decl_id) => { TyVariableDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::ConstantDeclaration(decl_id) => { TyConstantDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::ConfigurableDeclaration(decl_id) => { TyConfigurableDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::TraitTypeDeclaration(decl_id) => { TyTraitType::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::TraitFnDeclaration(decl_id) => { TyTraitFn::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::EnumDeclaration(decl_id) => { TyEnumDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::EnumVariantDeclaration(_decl) => {} parsed::Declaration::FunctionDeclaration(decl_id) => { TyFunctionDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::TraitDeclaration(decl_id) => { TyTraitDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::ImplSelfOrTrait(decl_id) => { TyImplSelfOrTrait::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::StructDeclaration(decl_id) => { TyStructDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::AbiDeclaration(decl_id) => { TyAbiDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::StorageDeclaration(decl_id) => { TyStorageDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::TypeAliasDeclaration(decl_id) => { TyTypeAliasDecl::collect(handler, engines, ctx, decl_id)? } parsed::Declaration::ConstGenericDeclaration(_) => { return Err(handler.emit_err(CompileError::Internal( "Unexpected error on const generics", decl.span(engines), ))); } }; Ok(()) } pub(crate) fn type_check( handler: &Handler, ctx: &mut TypeCheckContext, decl: parsed::Declaration, ) -> Result<ty::TyDecl, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let decl = match decl { parsed::Declaration::VariableDeclaration(decl_id) => { let decl = engines.pe().get_variable(&decl_id).as_ref().clone(); let name = decl.name.clone(); let span = decl.name.span(); let var_decl = match ty::TyVariableDecl::type_check(handler, ctx.by_ref(), decl) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let typed_var_decl = ty::TyDecl::VariableDecl(Box::new(var_decl)); ctx.insert_symbol(handler, name, typed_var_decl.clone())?; typed_var_decl } parsed::Declaration::ConstantDeclaration(decl_id) => { let decl = engines.pe().get_constant(&decl_id).as_ref().clone(); let span = decl.span.clone(); let const_decl = match ty::TyConstantDecl::type_check(handler, ctx.by_ref(), decl) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let typed_const_decl: ty::TyDecl = decl_engine .insert(const_decl.clone(), Some(&decl_id)) .into(); ctx.insert_symbol(handler, const_decl.name().clone(), typed_const_decl.clone())?; typed_const_decl } parsed::Declaration::ConfigurableDeclaration(decl_id) => { let decl = engines.pe().get_configurable(&decl_id).as_ref().clone(); let span = decl.span.clone(); let name = decl.name.clone(); let typed_const_decl = match ty::TyConfigurableDecl::type_check(handler, ctx.by_ref(), decl) { Ok(config_decl) => { config_decl.forbid_const_generics(handler, engines)?; ty::TyDecl::from(decl_engine.insert(config_decl, Some(&decl_id))) } Err(err) => ty::TyDecl::ErrorRecovery(span, err), }; ctx.insert_symbol(handler, name, typed_const_decl.clone())?; typed_const_decl } parsed::Declaration::TraitTypeDeclaration(decl_id) => { let decl = engines.pe().get_trait_type(&decl_id).as_ref().clone(); let span = decl.span.clone(); let type_decl = match ty::TyTraitType::type_check(handler, ctx.by_ref(), decl) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let typed_type_decl: ty::TyDecl = decl_engine.insert(type_decl.clone(), Some(&decl_id)).into(); ctx.insert_symbol(handler, type_decl.name().clone(), typed_type_decl.clone())?; typed_type_decl } parsed::Declaration::EnumDeclaration(decl_id) => { let decl = engines.pe().get_enum(&decl_id).as_ref().clone(); let span = decl.span.clone(); let enum_decl = match ty::TyEnumDecl::type_check(handler, ctx.by_ref(), decl) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let call_path = enum_decl.call_path.clone(); let decl: ty::TyDecl = decl_engine.insert(enum_decl, Some(&decl_id)).into(); ctx.insert_symbol(handler, call_path.suffix, decl.clone())?; decl } parsed::Declaration::EnumVariantDeclaration(_decl) => { // Type-checked above as part of the containing enum. unreachable!() } parsed::Declaration::FunctionDeclaration(decl_id) => { let fn_decl = engines.pe().get_function(&decl_id); let span = fn_decl.span.clone(); let mut ctx = ctx.by_ref().with_type_annotation(type_engine.new_unknown()); let fn_decl = match ty::TyFunctionDecl::type_check( handler, ctx.by_ref(), &fn_decl, false, false, None, ) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let name = fn_decl.name.clone(); let decl: ty::TyDecl = decl_engine.insert(fn_decl, Some(&decl_id)).into(); let _ = ctx.insert_symbol(handler, name, decl.clone()); decl } parsed::Declaration::TraitDeclaration(decl_id) => { let trait_decl = engines.pe().get_trait(&decl_id).as_ref().clone(); let span = trait_decl.span.clone(); let mut trait_decl = match ty::TyTraitDecl::type_check(handler, ctx.by_ref(), trait_decl) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let name = trait_decl.name.clone(); // save decl_refs for the LSP for supertrait in trait_decl.supertraits.iter_mut() { let _ = ctx.resolve_call_path(handler, &supertrait.name) .map(|supertrait_decl| { if let ty::TyDecl::TraitDecl(ty::TraitDecl { decl_id: supertrait_decl_id, }) = supertrait_decl { supertrait.decl_ref = Some(DeclRef::new( engines.de().get(&supertrait_decl_id).name.clone(), supertrait_decl_id, engines.de().get(&supertrait_decl_id).span.clone(), )); } }); } let decl: ty::TyDecl = decl_engine .insert(trait_decl.clone(), Some(&decl_id)) .into(); trait_decl .items .iter_mut() .for_each(|item| item.replace_implementing_type(engines, decl.clone())); ctx.insert_symbol(handler, name, decl.clone())?; decl } parsed::Declaration::ImplSelfOrTrait(decl_id) => { let impl_self_or_trait = engines .pe() .get_impl_self_or_trait(&decl_id) .as_ref() .clone(); let span = impl_self_or_trait.block_span.clone(); let mut impl_trait = if impl_self_or_trait.is_self { let impl_trait_decl = match ty::TyImplSelfOrTrait::type_check_impl_self( handler, ctx.by_ref(), &decl_id, impl_self_or_trait, ) { Ok(val) => val, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), }; let impl_trait = if let TyDecl::ImplSelfOrTrait(impl_trait_id) = &impl_trait_decl { decl_engine.get_impl_self_or_trait(&impl_trait_id.decl_id) } else { unreachable!(); }; ctx.insert_trait_implementation( handler, impl_trait.trait_name.clone(), impl_trait.trait_type_arguments.clone(), impl_trait.implementing_for.type_id, impl_trait.impl_type_parameters.clone(), &impl_trait.items, &impl_trait.span, impl_trait .trait_decl_ref .as_ref() .map(|decl_ref| decl_ref.decl_span().clone()), IsImplSelf::Yes, IsExtendingExistingImpl::No, IsImplInterfaceSurface::No, )?; return Ok(impl_trait_decl); } else { match ty::TyImplSelfOrTrait::type_check_impl_trait( handler, ctx.by_ref(), impl_self_or_trait, ) { Ok(res) => res, Err(err) => return Ok(ty::TyDecl::ErrorRecovery(span, err)), } }; // Insert prefixed symbols when implementing_for is Contract let is_contract = engines .te() .get(impl_trait.implementing_for.type_id) .is_contract(); if is_contract { for i in &impl_trait.items { if let ty::TyTraitItem::Fn(f) = i { let decl = engines.de().get(f.id()); let collecting_unifications = ctx.collecting_unifications(); let _ = ctx.namespace.current_module_mut().write(engines, |m| { Items::insert_typed_symbol( handler, engines, m, Ident::new_no_span(format!( "__contract_entry_{}", decl.name.clone() )), TyDecl::FunctionDecl(FunctionDecl { decl_id: *f.id() }), ConstShadowingMode::ItemStyle, GenericShadowingMode::Allow, collecting_unifications, ) }); } } } // Choose which items are going to be visible depending if this is an abi impl // or trait impl let t = ctx.resolve_call_path(&Handler::default(), &impl_trait.trait_name); let empty_vec = vec![]; let impl_trait_items = if let Ok(ty::TyDecl::TraitDecl { .. }) = t { &impl_trait.items } else { &empty_vec }; ctx.insert_trait_implementation( handler, impl_trait.trait_name.clone(), impl_trait.trait_type_arguments.clone(), impl_trait.implementing_for.type_id, impl_trait.impl_type_parameters.clone(), impl_trait_items, &impl_trait.span, impl_trait .trait_decl_ref .as_ref() .map(|decl_ref| decl_ref.decl_span().clone()), IsImplSelf::No, IsExtendingExistingImpl::No, IsImplInterfaceSurface::No, )?; let impl_trait_decl: ty::TyDecl = decl_engine .insert(impl_trait.clone(), Some(&decl_id)) .into(); impl_trait.items.iter_mut().for_each(|item| { item.replace_implementing_type(engines, impl_trait_decl.clone()); }); impl_trait_decl } parsed::Declaration::StructDeclaration(decl_id) => { let decl = engines.pe().get_struct(&decl_id).as_ref().clone(); let span = decl.span.clone(); let decl: ty::TyStructDecl = match ty::TyStructDecl::type_check(handler, ctx.by_ref(), decl) { Ok(res) => res, Err(err) => { return Ok(ty::TyDecl::ErrorRecovery(span, err)); } }; let call_path = decl.call_path.clone(); let decl: ty::TyDecl = decl_engine.insert(decl, Some(&decl_id)).into(); // insert the struct decl into namespace ctx.insert_symbol(handler, call_path.suffix, decl.clone())?; decl } parsed::Declaration::AbiDeclaration(decl_id) => { let abi_decl = engines.pe().get_abi(&decl_id).as_ref().clone(); let span = abi_decl.span.clone(); let mut abi_decl = match ty::TyAbiDecl::type_check(handler, ctx.by_ref(), abi_decl) { Ok(res) => res, Err(err) => { return Ok(ty::TyDecl::ErrorRecovery(span, err)); } }; let name = abi_decl.name.clone(); // save decl_refs for the LSP for supertrait in abi_decl.supertraits.iter_mut() { let _ = ctx.resolve_call_path(handler, &supertrait.name) .map(|supertrait_decl| { if let ty::TyDecl::TraitDecl(ty::TraitDecl { decl_id: supertrait_decl_id, }) = supertrait_decl { supertrait.decl_ref = Some(DeclRef::new( engines.de().get(&supertrait_decl_id).name.clone(), supertrait_decl_id, engines.de().get(&supertrait_decl_id).span.clone(), )); } }); } let decl: ty::TyDecl = decl_engine.insert(abi_decl.clone(), Some(&decl_id)).into(); abi_decl .items .iter_mut() .for_each(|item| item.replace_implementing_type(engines, decl.clone())); ctx.insert_symbol(handler, name, decl.clone())?; decl } parsed::Declaration::StorageDeclaration(decl_id) => { let parsed::StorageDeclaration { span, entries, attributes, storage_keyword, } = engines.pe().get_storage(&decl_id).as_ref().clone(); let mut fields_buf = vec![]; fn type_check_storage_entries( handler: &Handler, mut ctx: TypeCheckContext, entries: Vec<StorageEntry>, fields_buf: &mut Vec<TyStorageField>, namespace_names: Vec<Ident>, ) -> Result<(), ErrorEmitted> { let engines = ctx.engines; for entry in entries { if let StorageEntry::Field(parsed::StorageField { name, key_expression, initializer, mut type_argument, attributes, span: field_span, .. }) = entry { type_argument.type_id = ctx.by_ref().resolve_type( handler, type_argument.type_id, &name.span(), EnforceTypeArguments::Yes, None, )?; let mut ctx = ctx .by_ref() .with_type_annotation(type_argument.type_id) .with_storage_declaration(); let initializer = ty::TyExpression::type_check(handler, ctx.by_ref(), &initializer)?; let key_expression = match key_expression { Some(key_expression) => { let key_ctx = ctx .with_type_annotation(engines.te().id_of_b256()) .with_help_text("Storage keys must have type \"b256\"."); // TODO: Remove the `handler.scope` once https://github.com/FuelLabs/sway/issues/5606 gets solved. // We need it here so that we can short-circuit in case of a `TypeMismatch` error which is // not treated as an error in the `type_check()`'s result. let typed_expr = handler.scope(|handler| { ty::TyExpression::type_check( handler, key_ctx, &key_expression, ) })?; Some(typed_expr) } None => None, }; fields_buf.push(ty::TyStorageField { name, namespace_names: namespace_names.clone(), key_expression, type_argument, initializer, span: field_span, attributes, }); } else if let StorageEntry::Namespace(namespace) = entry { let mut new_namespace_names = namespace_names.clone(); new_namespace_names.push(namespace.name); type_check_storage_entries( handler, ctx.by_ref(), namespace .entries .iter() .map(|e| (**e).clone()) .collect::<Vec<_>>(), fields_buf, new_namespace_names, )?; } } Ok(()) } type_check_storage_entries( handler, ctx.by_ref(), entries, &mut fields_buf, vec![], )?; let decl = ty::TyStorageDecl { fields: fields_buf, span, attributes, storage_keyword, }; let decl_ref = decl_engine.insert(decl, Some(&decl_id)); // insert the storage declaration into the symbols // if there already was one, return an error that duplicate storage // declarations are not allowed ctx.namespace_mut() .current_module_mut() .write(engines, |m| { m.current_items_mut() .set_storage_declaration(handler, decl_ref.clone()) })?; decl_ref.into() } parsed::Declaration::TypeAliasDeclaration(decl_id) => { let decl = engines.pe().get_type_alias(&decl_id); let span = decl.name.span(); let name = decl.name.clone(); let ty = &decl.ty; // Resolve the type that the type alias replaces let new_ty = ctx .resolve_type(handler, ty.type_id, &span, EnforceTypeArguments::Yes, None) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); // create the type alias decl using the resolved type above let decl = ty::TyTypeAliasDecl { name: name.clone(), call_path: CallPath::from(name.clone()).to_fullpath(engines, ctx.namespace()), attributes: decl.attributes.clone(), ty: GenericTypeArgument { initial_type_id: ty.initial_type_id, type_id: new_ty, call_path_tree: ty.call_path_tree.as_ref().cloned(), span: ty.span.clone(), }, visibility: decl.visibility, span, }; let decl: ty::TyDecl = decl_engine.insert(decl, Some(&decl_id)).into(); // insert the type alias name and decl into namespace ctx.insert_symbol(handler, name, decl.clone())?; decl } parsed::Declaration::TraitFnDeclaration(_decl_id) => { unreachable!(); } parsed::Declaration::ConstGenericDeclaration(_) => { unreachable!("ConstGenericDecl is not reachable from AstNode") } }; Ok(decl) } } impl TypeCheckAnalysis for TyDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { match self { TyDecl::VariableDecl(var_decl) => { var_decl.type_check_analyze(handler, ctx)?; } TyDecl::ConstantDecl(node) => { let const_decl = ctx.engines.de().get_constant(&node.decl_id); const_decl.type_check_analyze(handler, ctx)?; } TyDecl::ConfigurableDecl(node) => { let const_decl = ctx.engines.de().get_configurable(&node.decl_id); const_decl.type_check_analyze(handler, ctx)?; } TyDecl::ConstGenericDecl(_) => { unreachable!("ConstGenericDecl is not reachable from AstNode") } TyDecl::FunctionDecl(node) => { let fn_decl = ctx.engines.de().get_function(&node.decl_id); fn_decl.type_check_analyze(handler, ctx)?; } TyDecl::TraitDecl(node) => { let trait_decl = ctx.engines.de().get_trait(&node.decl_id); trait_decl.type_check_analyze(handler, ctx)?; } TyDecl::StructDecl(node) => { let struct_decl = ctx.engines.de().get_struct(&node.decl_id); struct_decl.type_check_analyze(handler, ctx)?; } TyDecl::EnumDecl(node) => { let enum_decl = ctx.engines.de().get_enum(&node.decl_id); enum_decl.type_check_analyze(handler, ctx)?; } TyDecl::EnumVariantDecl(_) => {} TyDecl::ImplSelfOrTrait(node) => { node.type_check_analyze(handler, ctx)?; } TyDecl::AbiDecl(node) => { let abi_decl = ctx.engines.de().get_abi(&node.decl_id); abi_decl.type_check_analyze(handler, ctx)?; } TyDecl::GenericTypeForFunctionScope(_) => {} TyDecl::ErrorRecovery(_, _) => {} TyDecl::StorageDecl(node) => { let storage_decl = ctx.engines.de().get_storage(&node.decl_id); storage_decl.type_check_analyze(handler, ctx)?; } TyDecl::TypeAliasDecl(_) => {} TyDecl::TraitTypeDecl(_) => {} } Ok(()) } } impl TypeCheckFinalization for TyDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { let decl_engine = ctx.engines.de(); match self { TyDecl::VariableDecl(node) => { node.type_check_finalize(handler, ctx)?; } TyDecl::ConstantDecl(node) => { let mut const_decl = (*ctx.engines.de().get_constant(&node.decl_id)).clone(); const_decl.type_check_finalize(handler, ctx)?; } TyDecl::ConfigurableDecl(node) => { let mut config_decl = (*ctx.engines.de().get_configurable(&node.decl_id)).clone(); config_decl.type_check_finalize(handler, ctx)?; } TyDecl::ConstGenericDecl(_) => { unreachable!("ConstGenericDecl is not reachable from AstNode") } TyDecl::FunctionDecl(node) => { let mut fn_decl = (*ctx.engines.de().get_function(&node.decl_id)).clone(); fn_decl.type_check_finalize(handler, ctx)?; } TyDecl::TraitDecl(node) => { let mut trait_decl = (*ctx.engines.de().get_trait(&node.decl_id)).clone(); trait_decl.type_check_finalize(handler, ctx)?; } TyDecl::StructDecl(node) => { let mut struct_decl = (*ctx.engines.de().get_struct(&node.decl_id)).clone(); struct_decl.type_check_finalize(handler, ctx)?; } TyDecl::EnumDecl(node) => { let mut enum_decl = (*ctx.engines.de().get_enum(&node.decl_id)).clone(); enum_decl.type_check_finalize(handler, ctx)?; } TyDecl::EnumVariantDecl(_) => {} TyDecl::ImplSelfOrTrait(node) => { let mut impl_trait = (*decl_engine.get_impl_self_or_trait(&node.decl_id)).clone(); impl_trait.type_check_finalize(handler, ctx)?; } TyDecl::AbiDecl(node) => { let mut abi_decl = (*decl_engine.get_abi(&node.decl_id)).clone(); abi_decl.type_check_finalize(handler, ctx)?; } TyDecl::GenericTypeForFunctionScope(_) => {} TyDecl::ErrorRecovery(_, _) => {} TyDecl::StorageDecl(node) => { let mut storage_decl = (*decl_engine.get_storage(&node.decl_id)).clone(); storage_decl.type_check_finalize(handler, ctx)?; } TyDecl::TypeAliasDecl(node) => { let mut type_alias_decl = (*decl_engine.get_type_alias(&node.decl_id)).clone(); type_alias_decl.type_check_finalize(handler, ctx)?; } TyDecl::TraitTypeDecl(_node) => {} } Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/supertrait.rs
sway-core/src/semantic_analysis/ast_node/declaration/supertrait.rs
use std::collections::BTreeMap; use sway_error::error::CompileError; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::{Span, Spanned}; use crate::EnforceTypeArguments; use crate::{ language::{parsed, ty}, semantic_analysis::TypeCheckContext, TypeId, }; #[derive(Clone, PartialEq, Eq)] pub enum SupertraitOf { Abi(Span), // Span is needed for error reporting Trait, } /// Recursively insert the interface surfaces and methods from supertraits to /// the given namespace. pub(crate) fn insert_supertraits_into_namespace( handler: &Handler, mut ctx: TypeCheckContext, type_id: TypeId, supertraits: &[parsed::Supertrait], supertraits_of: &SupertraitOf, ) -> Result<(), ErrorEmitted> { let engines = ctx.engines; let decl_engine = engines.de(); handler.scope(|handler| { for supertrait in supertraits.iter() { // Right now we don't have the ability to support defining a supertrait // using a callpath directly, so we check to see if the user has done // this and we disallow it. if !supertrait.name.prefixes.is_empty() { handler.emit_err(CompileError::Unimplemented { feature: "Using module paths to define supertraits".to_string(), help: vec![ // Note that eventual leading `::` will not be shown. It'a fine for now, we anyhow want to implement using module paths. format!( "Import the supertrait by using: `use {};`.", supertrait.name ), format!( "Then, in the list of supertraits, just use the trait name \"{}\".", supertrait.name.suffix ), ], span: supertrait.span(), }); continue; } let decl = ctx // Use the default Handler to avoid emitting the redundant SymbolNotFound error. .resolve_call_path(&Handler::default(), &supertrait.name) .ok(); match (decl.clone(), supertraits_of) { // a trait can be a supertrait of either a trait or a an ABI (Some(ty::TyDecl::TraitDecl(ty::TraitDecl { decl_id, .. })), _) => { let mut trait_decl = (*decl_engine.get_trait(&decl_id)).clone(); // Right now we don't parse type arguments for supertraits, so // we should give this error message to users. if !trait_decl.type_parameters.is_empty() { handler.emit_err(CompileError::Unimplemented { feature: "Using generic traits as supertraits".to_string(), help: vec![], span: supertrait.span(), }); continue; } // TODO: right now supertraits can't take type arguments let mut type_arguments = vec![]; // Monomorphize the trait declaration. if ctx .monomorphize( handler, &mut trait_decl, &mut type_arguments, BTreeMap::new(), EnforceTypeArguments::Yes, &supertrait.name.span(), ) .is_err() { continue; } // Insert the interface surface and methods from this trait into // the namespace. trait_decl.insert_interface_surface_and_items_into_namespace( handler, ctx.by_ref(), &supertrait.name, &type_arguments, type_id, ); // Recurse to insert versions of interfaces and methods of the // *super* supertraits. if insert_supertraits_into_namespace( handler, ctx.by_ref(), type_id, &trait_decl.supertraits, &SupertraitOf::Trait, ) .is_err() { continue; } } // an ABI can only be a superABI of an ABI ( Some(ty::TyDecl::AbiDecl(ty::AbiDecl { decl_id, .. })), SupertraitOf::Abi(subabi_span), ) => { let abi_decl = decl_engine.get_abi(&decl_id); // Insert the interface surface and methods from this ABI into // the namespace. if abi_decl .insert_interface_surface_and_items_into_namespace( handler, decl_id, ctx.by_ref(), type_id, Some(subabi_span.clone()), ) .is_err() { continue; } // Recurse to insert versions of interfaces and methods of the // *super* superABIs. if insert_supertraits_into_namespace( handler, ctx.by_ref(), type_id, &abi_decl.supertraits, &SupertraitOf::Abi(subabi_span.clone()), ) .is_err() { continue; } } // an ABI cannot be a supertrait of a trait (Some(ty::TyDecl::AbiDecl { .. }), SupertraitOf::Trait) => { handler.emit_err(CompileError::AbiAsSupertrait { span: supertrait.name.span().clone(), }); } _ => { handler.emit_err(CompileError::TraitNotFound { name: supertrait.name.to_string(), span: supertrait.name.span(), }); } } } Ok(()) }) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/trait.rs
sway-core/src/semantic_analysis/ast_node/declaration/trait.rs
use std::collections::{BTreeMap, HashMap, HashSet}; use ast_elements::type_parameter::GenericTypeParameter; use parsed_id::ParsedDeclId; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{style::is_upper_camel_case, Ident, Named, Spanned}; use crate::{ decl_engine::*, language::{ parsed::*, ty::{ self, TyConstantDecl, TyFunctionDecl, TyImplItem, TyTraitDecl, TyTraitFn, TyTraitItem, TyTraitType, }, CallPath, }, namespace::{IsExtendingExistingImpl, IsImplInterfaceSurface, IsImplSelf}, semantic_analysis::{ declaration::{insert_supertraits_into_namespace, SupertraitOf}, symbol_collection_context::SymbolCollectionContext, AbiMode, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, type_system::*, Engines, }; impl TyTraitItem { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, item: &TraitItem, ) -> Result<(), ErrorEmitted> { match item { TraitItem::TraitFn(decl_id) => TyTraitFn::collect(handler, engines, ctx, decl_id), TraitItem::Constant(decl_id) => TyConstantDecl::collect(handler, engines, ctx, decl_id), TraitItem::Type(decl_id) => TyTraitType::collect(handler, engines, ctx, decl_id), TraitItem::Error(_, _) => Ok(()), } } } impl TyTraitDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<TraitDeclaration>, ) -> Result<(), ErrorEmitted> { let trait_decl = engines.pe().get_trait(decl_id); let decl = Declaration::TraitDeclaration(*decl_id); ctx.insert_parsed_symbol(handler, engines, trait_decl.name.clone(), decl.clone())?; // A temporary namespace for checking within the trait's scope. let _ = ctx.scoped(engines, trait_decl.span.clone(), Some(decl), |scoped_ctx| { trait_decl.interface_surface.iter().for_each(|item| { let _ = TyTraitItem::collect(handler, engines, scoped_ctx, item); }); trait_decl.methods.iter().for_each(|decl_id| { let _ = TyFunctionDecl::collect(handler, engines, scoped_ctx, decl_id); }); Ok(()) }); Ok(()) } pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, trait_decl: TraitDeclaration, ) -> Result<Self, ErrorEmitted> { let TraitDeclaration { name, type_parameters, attributes, interface_surface, methods, supertraits, visibility, span, } = trait_decl; if !is_upper_camel_case(name.as_str()) { handler.emit_warn(CompileWarning { span: name.span(), warning_content: Warning::NonClassCaseTraitName { name: name.clone() }, }); } let decl_engine = ctx.engines.de(); let engines = ctx.engines(); // Create a new type parameter for the self type. // The span of the `trait_decl` `name` points to the file (use site) in which // the trait is getting declared, so we can use it as the `use_site_span`. let self_type_param = GenericTypeParameter::new_self_type(engines, name.span()); let self_type = self_type_param.type_id; // A temporary namespace for checking within the trait's scope. ctx.with_self_type(Some(self_type)) .scoped(handler, Some(span.clone()), |ctx| { // Type check the type parameters. let new_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), type_parameters, Some(self_type_param.clone()), )?; // Recursively make the interface surfaces and methods of the // supertraits available to this trait. insert_supertraits_into_namespace( handler, ctx.by_ref(), self_type, &supertraits, &SupertraitOf::Trait, )?; // type check the interface surface let mut new_interface_surface = vec![]; let mut dummy_interface_surface = vec![]; let mut ids: HashSet<Ident> = HashSet::default(); for item in interface_surface.clone().into_iter() { let decl_name = match item { TraitItem::TraitFn(_) => None, TraitItem::Constant(_) => None, TraitItem::Type(decl_id) => { let type_decl = engines.pe().get_trait_type(&decl_id).as_ref().clone(); let type_decl = ty::TyTraitType::type_check( handler, ctx.by_ref(), type_decl.clone(), )?; let decl_ref = decl_engine.insert(type_decl.clone(), Some(&decl_id)); dummy_interface_surface.push(ty::TyImplItem::Type(decl_ref.clone())); new_interface_surface .push(ty::TyTraitInterfaceItem::Type(decl_ref.clone())); Some(type_decl.name) } TraitItem::Error(_, _) => None, }; if let Some(decl_name) = decl_name { if !ids.insert(decl_name.clone()) { handler.emit_err(CompileError::MultipleDefinitionsOfName { name: decl_name.clone(), span: decl_name.span(), }); } } } // insert placeholder functions representing the interface surface // to allow methods to use those functions ctx.insert_trait_implementation( handler, CallPath::ident_to_fullpath(name.clone(), ctx.namespace), new_type_parameters.iter().map(|x| x.into()).collect(), self_type, vec![], &dummy_interface_surface, &span, None, IsImplSelf::No, IsExtendingExistingImpl::No, IsImplInterfaceSurface::No, )?; let mut dummy_interface_surface = vec![]; for item in interface_surface.into_iter() { let decl_name = match item { TraitItem::TraitFn(decl_id) => { let method = engines.pe().get_trait_fn(&decl_id); let method = ty::TyTraitFn::type_check(handler, ctx.by_ref(), &method)?; let decl_ref = decl_engine.insert(method.clone(), Some(&decl_id)); dummy_interface_surface.push(ty::TyImplItem::Fn( decl_engine .insert( method.to_dummy_func(AbiMode::NonAbi, Some(self_type)), None, ) .with_parent(decl_engine, (*decl_ref.id()).into()), )); new_interface_surface.push(ty::TyTraitInterfaceItem::TraitFn(decl_ref)); Some(method.name.clone()) } TraitItem::Constant(decl_id) => { let const_decl = engines.pe().get_constant(&decl_id).as_ref().clone(); let const_decl = ty::TyConstantDecl::type_check(handler, ctx.by_ref(), const_decl)?; let decl_ref = ctx.engines.de().insert(const_decl.clone(), Some(&decl_id)); new_interface_surface .push(ty::TyTraitInterfaceItem::Constant(decl_ref.clone())); let const_name = const_decl.call_path.suffix.clone(); ctx.insert_symbol( handler, const_name.clone(), ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_ref.id(), }), )?; Some(const_name) } TraitItem::Type(_) => None, TraitItem::Error(_, _) => { continue; } }; if let Some(decl_name) = decl_name { if !ids.insert(decl_name.clone()) { handler.emit_err(CompileError::MultipleDefinitionsOfName { name: decl_name.clone(), span: decl_name.span(), }); } } } // insert placeholder functions representing the interface surface // to allow methods to use those functions ctx.insert_trait_implementation( handler, CallPath::ident_to_fullpath(name.clone(), ctx.namespace()), new_type_parameters.iter().map(|x| x.into()).collect(), self_type, vec![], &dummy_interface_surface, &span, None, IsImplSelf::No, IsExtendingExistingImpl::Yes, IsImplInterfaceSurface::No, )?; // Type check the items. let mut new_items = vec![]; for method_decl_id in methods.into_iter() { let method = engines.pe().get_function(&method_decl_id); let method = ty::TyFunctionDecl::type_check( handler, ctx.by_ref(), &method, true, false, Some(self_type_param.type_id), ) .unwrap_or_else(|_| ty::TyFunctionDecl::error(&method)); new_items.push(ty::TyTraitItem::Fn( decl_engine.insert(method, Some(&method_decl_id)), )); } let typed_trait_decl = ty::TyTraitDecl { name: name.clone(), type_parameters: new_type_parameters, self_type: TypeParameter::Type(self_type_param), interface_surface: new_interface_surface, items: new_items, supertraits, visibility, attributes, call_path: CallPath::from(name).to_fullpath(ctx.engines(), ctx.namespace()), span, }; Ok(typed_trait_decl) }) } /// Retrieves the interface surface and implemented items for this trait. pub(crate) fn retrieve_interface_surface_and_implemented_items_for_type( &self, handler: &Handler, ctx: TypeCheckContext, type_id: TypeId, call_path: &CallPath, ) -> (InterfaceItemMap, ItemMap) { let mut interface_surface_item_refs: InterfaceItemMap = BTreeMap::new(); let mut impld_item_refs: ItemMap = BTreeMap::new(); let ty::TyTraitDecl { interface_surface, .. } = self; // Retrieve the interface surface for this trait. for item in interface_surface.iter() { match item { ty::TyTraitInterfaceItem::TraitFn(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitInterfaceItem::Constant(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitInterfaceItem::Type(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } } } // Retrieve the implemented items for this type. for item in ctx .get_items_for_type_and_trait_name(handler, type_id, call_path) .into_iter() { match &item { ty::TyTraitItem::Fn(decl_ref) => { impld_item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitItem::Constant(decl_ref) => { impld_item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitItem::Type(decl_ref) => { impld_item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } }; } (interface_surface_item_refs, impld_item_refs) } /// Retrieves the interface surface, items, and implemented items for /// this trait. pub(crate) fn retrieve_interface_surface_and_items_and_implemented_items_for_type( &self, handler: &Handler, ctx: &TypeCheckContext, type_id: TypeId, call_path: &CallPath, generic_args: &[GenericArgument], ) -> (InterfaceItemMap, ItemMap, ItemMap) { let mut interface_surface_item_refs: InterfaceItemMap = BTreeMap::new(); let mut item_refs: ItemMap = BTreeMap::new(); let mut impld_item_refs: ItemMap = BTreeMap::new(); let ty::TyTraitDecl { interface_surface, items, type_parameters, .. } = self; let decl_engine = ctx.engines.de(); let engines = ctx.engines(); // Retrieve the interface surface for this trait. for item in interface_surface.iter() { match item { ty::TyTraitInterfaceItem::TraitFn(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitInterfaceItem::Constant(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitInterfaceItem::Type(decl_ref) => { interface_surface_item_refs .insert((decl_ref.name().clone(), type_id), item.clone()); } } } // Retrieve the trait items for this trait. for item in items.iter() { match item { ty::TyTraitItem::Fn(decl_ref) => { item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitItem::Constant(decl_ref) => { item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } ty::TyTraitItem::Type(decl_ref) => { item_refs.insert((decl_ref.name().clone(), type_id), item.clone()); } } } // Retrieve the implemented items for this type. let type_mapping = TypeSubstMap::from_type_parameters_and_type_arguments( type_parameters.iter().map(|t| { let t = t .as_type_parameter() .expect("only works with type parameters"); t.type_id }), generic_args.iter().map(|t| t.type_id()), ); for item in ctx .get_items_for_type_and_trait_name_and_trait_type_arguments( handler, type_id, call_path, generic_args, ) .into_iter() { match item { ty::TyTraitItem::Fn(decl_ref) => { let mut method = (*decl_engine.get_function(&decl_ref)).clone(); let name = method.name.clone(); let r = if method .subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )) .has_changes() { let new_ref = decl_engine .insert( method, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ) .with_parent(decl_engine, (*decl_ref.id()).into()); new_ref } else { decl_ref.clone() }; impld_item_refs.insert((name, type_id), TyTraitItem::Fn(r)); } ty::TyTraitItem::Constant(decl_ref) => { let mut const_decl = (*decl_engine.get_constant(&decl_ref)).clone(); let name = const_decl.call_path.suffix.clone(); let r = if const_decl .subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )) .has_changes() { decl_engine.insert( const_decl, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ) } else { decl_ref.clone() }; impld_item_refs.insert((name, type_id), TyTraitItem::Constant(r)); } ty::TyTraitItem::Type(decl_ref) => { let mut t = (*decl_engine.get_type(&decl_ref)).clone(); let name = t.name.clone(); let r = if t .subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )) .has_changes() { decl_engine .insert(t, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref()) } else { decl_ref.clone() }; impld_item_refs.insert((name, type_id), TyTraitItem::Type(r)); } } } (interface_surface_item_refs, item_refs, impld_item_refs) } pub(crate) fn insert_interface_surface_and_items_into_namespace( &self, handler: &Handler, mut ctx: TypeCheckContext, trait_name: &CallPath, type_arguments: &[GenericArgument], type_id: TypeId, ) { let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let ty::TyTraitDecl { interface_surface, items, type_parameters, .. } = self; let mut all_items = vec![]; // Retrieve the trait items for this trait. Transform them into the // correct typing for this impl block by using the type parameters from // the original trait declaration and the given type arguments. let type_mapping = TypeSubstMap::from_type_parameters_and_type_arguments( type_parameters.iter().map(|t| { let t = t .as_type_parameter() .expect("only works with type parameters"); t.type_id }), type_arguments.iter().map(|t| t.type_id()), ); let mut const_symbols = HashMap::<Ident, ty::TyDecl>::new(); for item in interface_surface.iter() { match item { ty::TyTraitInterfaceItem::TraitFn(decl_ref) => { let mut method = (*decl_engine.get_trait_fn(decl_ref)).clone(); method.subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )); all_items.push(TyImplItem::Fn( decl_engine .insert(method.to_dummy_func(AbiMode::NonAbi, Some(type_id)), None) .with_parent(ctx.engines.de(), (*decl_ref.id()).into()), )); } ty::TyTraitInterfaceItem::Constant(decl_ref) => { let const_decl = decl_engine.get_constant(decl_ref); all_items.push(TyImplItem::Constant(decl_ref.clone())); let const_name = const_decl.call_path.suffix.clone(); const_symbols.insert( const_name, ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_ref.id(), }), ); } ty::TyTraitInterfaceItem::Type(decl_ref) => { all_items.push(TyImplItem::Type(decl_ref.clone())); } } } for item in items.iter() { match item { ty::TyTraitItem::Fn(decl_ref) => { let mut method = (*decl_engine.get_function(decl_ref)).clone(); method.subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )); all_items.push(TyImplItem::Fn( ctx.engines .de() .insert( method, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ) .with_parent(decl_engine, (*decl_ref.id()).into()), )); } ty::TyTraitItem::Constant(decl_ref) => { let mut const_decl = (*decl_engine.get_constant(decl_ref)).clone(); const_decl.subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )); let const_name = const_decl.name().clone(); let const_has_value = const_decl.value.is_some(); let decl_id = decl_engine.insert( const_decl, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ); all_items.push(TyImplItem::Constant(decl_id.clone())); // If this non-interface item has a value, then we want to overwrite the // the previously inserted constant symbol from the interface surface. if const_has_value { const_symbols.insert( const_name, ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_id.id(), }), ); } } ty::TyTraitItem::Type(decl_ref) => { let mut type_decl = (*decl_engine.get_type(decl_ref)).clone(); type_decl.subst(&SubstTypesContext::new( handler, engines, &type_mapping, !ctx.code_block_first_pass(), )); all_items.push(TyImplItem::Type(decl_engine.insert( type_decl, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ))); } } } // TODO: This is a temporary symptom-fix for https://github.com/FuelLabs/sway/issues/7396. // Remove it once the underlying issue https://github.com/FuelLabs/sway/issues/7428 is fixed. let prev_const_shadowing_mode = ctx.const_shadowing_mode; ctx.const_shadowing_mode = crate::semantic_analysis::ConstShadowingMode::Allow; // Insert the constants into the namespace. for (name, decl) in const_symbols.into_iter() { let _ = ctx.insert_symbol(handler, name, decl); } ctx.const_shadowing_mode = prev_const_shadowing_mode; // Insert the methods of the trait into the namespace. // Specifically do not check for conflicting definitions because // this is just a temporary namespace for type checking and // these are not actual impl blocks. let interface_handler = Handler::default(); let _ = ctx.insert_trait_implementation( &interface_handler, trait_name.clone(), type_arguments.to_vec(), type_id, vec![], &all_items, &trait_name.span(), Some(self.span()), IsImplSelf::No, IsExtendingExistingImpl::No, IsImplInterfaceSurface::Yes, ); debug_assert!(!interface_handler.has_errors()); } } impl TypeCheckAnalysis for TyTraitDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { for item in self.items.iter() { item.type_check_analyze(handler, ctx)?; } Ok(()) } } impl TypeCheckFinalization for TyTraitDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for item in self.items.iter_mut() { let _ = item.type_check_finalize(handler, ctx); } Ok(()) }) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/abi.rs
sway-core/src/semantic_analysis/ast_node/declaration/abi.rs
use std::collections::{HashMap, HashSet}; use sway_error::error::CompileError; use sway_types::{Ident, Named, Span, Spanned}; use crate::{ ast_elements::type_parameter::GenericTypeParameter, decl_engine::{ parsed_id::ParsedDeclId, DeclEngineGetParsedDeclId, DeclEngineInsert, DeclEngineInsertArc, DeclId, }, language::ty::{TyAbiDecl, TyFunctionDecl}, namespace::{IsExtendingExistingImpl, IsImplInterfaceSurface, IsImplSelf}, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, Engines, }; use sway_error::handler::{ErrorEmitted, Handler}; use crate::{ language::{ parsed::*, ty::{self, TyImplItem, TyTraitItem}, CallPath, }, semantic_analysis::declaration::SupertraitOf, semantic_analysis::{ declaration::insert_supertraits_into_namespace, AbiMode, TypeCheckContext, }, TypeId, }; impl ty::TyAbiDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<AbiDeclaration>, ) -> Result<(), ErrorEmitted> { let abi_decl = engines.pe().get_abi(decl_id); let decl = Declaration::AbiDeclaration(*decl_id); ctx.insert_parsed_symbol(handler, engines, abi_decl.name.clone(), decl.clone())?; let _ = ctx.scoped(engines, abi_decl.span.clone(), Some(decl), |scoped_ctx| { abi_decl.interface_surface.iter().for_each(|item| { let _ = TyTraitItem::collect(handler, engines, scoped_ctx, item); }); abi_decl.methods.iter().for_each(|decl_id| { let _ = TyFunctionDecl::collect(handler, engines, scoped_ctx, decl_id); }); Ok(()) }); Ok(()) } pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, abi_decl: AbiDeclaration, ) -> Result<Self, ErrorEmitted> { let engines = ctx.engines(); let AbiDeclaration { name, interface_surface, supertraits, methods, span, attributes, } = abi_decl; // We don't want the user to waste resources by contract calling // themselves, and we don't want to do more work in the compiler, // so we don't support the case of calling a contract's own interface // from itself. This is by design. // The span of the `abi_decl` `name` points to the file (use site) in which // the ABI is getting declared, so we can use it as the `use_site_span`. let self_type_param = GenericTypeParameter::new_self_type(ctx.engines, name.span()); let self_type_id = self_type_param.type_id; let mod_path = ctx.namespace().current_mod_path().clone(); // A temporary namespace for checking within this scope. ctx.with_abi_mode(AbiMode::ImplAbiFn(name.clone(), None)) .with_self_type(Some(self_type_id)) .scoped(handler, Some(span.clone()), |ctx| { // Insert the "self" type param into the namespace. self_type_param.insert_self_type_into_namespace(handler, ctx.by_ref()); // Recursively make the interface surfaces and methods of the // supertraits available to this abi. insert_supertraits_into_namespace( handler, ctx.by_ref(), self_type_id, &supertraits, &SupertraitOf::Abi(span.clone()), )?; // Type check the interface surface. let mut new_interface_surface = vec![]; let mut ids: HashSet<Ident> = HashSet::default(); let error_on_shadowing_superabi_method = |method_name: &Ident, ctx: &mut TypeCheckContext| { if let Ok(superabi_impl_method_ref) = ctx.find_method_for_type( &Handler::default(), self_type_id, &mod_path, &method_name.clone(), ctx.type_annotation(), &[], None, ) { let superabi_impl_method = ctx.engines.de().get_function(&superabi_impl_method_ref); if let Some(ty::TyDecl::AbiDecl(abi_decl)) = &superabi_impl_method.implementing_type { let abi_decl = engines.de().get_abi(&abi_decl.decl_id); handler.emit_err(CompileError::AbiShadowsSuperAbiMethod { span: method_name.span(), superabi: abi_decl.name().clone(), }); } } }; for item in interface_surface.into_iter() { let decl_name = match item { TraitItem::TraitFn(decl_id) => { let method = engines.pe().get_trait_fn(&decl_id); // check that a super-trait does not define a method // with the same name as the current interface method error_on_shadowing_superabi_method(&method.name, ctx); let method = ty::TyTraitFn::type_check(handler, ctx.by_ref(), &method)?; for param in &method.parameters { if param.is_reference || param.is_mutable { handler.emit_err( CompileError::RefMutableNotAllowedInContractAbi { param_name: param.name.clone(), span: param.name.span(), }, ); } } new_interface_surface.push(ty::TyTraitInterfaceItem::TraitFn( ctx.engines.de().insert(method.clone(), Some(&decl_id)), )); method.name.clone() } TraitItem::Constant(decl_id) => { let const_decl = engines.pe().get_constant(&decl_id).as_ref().clone(); let const_decl = ty::TyConstantDecl::type_check(handler, ctx.by_ref(), const_decl)?; let decl_ref = ctx.engines.de().insert(const_decl.clone(), Some(&decl_id)); new_interface_surface .push(ty::TyTraitInterfaceItem::Constant(decl_ref.clone())); const_decl.call_path.suffix.clone() } TraitItem::Type(decl_id) => { let type_decl = engines.pe().get_trait_type(&decl_id).as_ref().clone(); handler.emit_err(CompileError::AssociatedTypeNotSupportedInAbi { span: type_decl.span.clone(), }); let type_decl = ty::TyTraitType::type_check(handler, ctx.by_ref(), type_decl)?; let decl_ref = ctx.engines().de().insert(type_decl.clone(), Some(&decl_id)); new_interface_surface .push(ty::TyTraitInterfaceItem::Type(decl_ref.clone())); type_decl.name } TraitItem::Error(_, _) => { continue; } }; if !ids.insert(decl_name.clone()) { handler.emit_err(CompileError::MultipleDefinitionsOfName { name: decl_name.clone(), span: decl_name.span(), }); } } // Type check the items. let mut new_items = vec![]; for method_id in methods.into_iter() { let method = engines.pe().get_function(&method_id); let method = ty::TyFunctionDecl::type_check( handler, ctx.by_ref(), &method, false, false, Some(self_type_param.type_id), ) .unwrap_or_else(|_| ty::TyFunctionDecl::error(&method)); error_on_shadowing_superabi_method(&method.name, ctx); for param in method.parameters.iter() { if param.is_reference || param.is_mutable { handler.emit_err(CompileError::RefMutableNotAllowedInContractAbi { param_name: param.name.clone(), span: param.name.span(), }); } } if !ids.insert(method.name.clone()) { handler.emit_err(CompileError::MultipleDefinitionsOfName { name: method.name.clone(), span: method.name.span(), }); } new_items.push(TyTraitItem::Fn( ctx.engines.de().insert(method, Some(&method_id)), )); } // Compared to regular traits, we do not insert recursively methods of ABI supertraits // into the interface surface, we do not want supertrait methods to be available to // the ABI user, only the contract methods can use supertrait methods let abi_decl = ty::TyAbiDecl { interface_surface: new_interface_surface, supertraits, items: new_items, name, span, attributes, }; abi_decl.forbid_const_generics(handler, engines)?; Ok(abi_decl) }) } pub(crate) fn insert_interface_surface_and_items_into_namespace( &self, handler: &Handler, self_decl_id: DeclId<ty::TyAbiDecl>, mut ctx: TypeCheckContext, type_id: TypeId, subabi_span: Option<Span>, ) -> Result<(), ErrorEmitted> { let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let ty::TyAbiDecl { interface_surface, items, .. } = self; let mut all_items = vec![]; let (look_for_conflicting_abi_methods, subabi_span) = if let Some(subabi) = subabi_span { (true, subabi) } else { (false, Span::dummy()) }; let mod_path = ctx.namespace().current_mod_path().clone(); let mut const_symbols = HashMap::<Ident, ty::TyDecl>::new(); handler.scope(|handler| { for item in interface_surface.iter() { match item { ty::TyTraitInterfaceItem::TraitFn(decl_ref) => { let method = decl_engine.get_trait_fn(decl_ref); if look_for_conflicting_abi_methods { // looking for conflicting ABI methods for triangle-like ABI hierarchies if let Ok(superabi_method_ref) = ctx.find_method_for_type( &Handler::default(), type_id, &mod_path, &method.name.clone(), ctx.type_annotation(), &[], None, ) { let superabi_method = ctx.engines.de().get_function(&superabi_method_ref); if let Some(ty::TyDecl::AbiDecl(abi_decl)) = superabi_method.implementing_type.clone() { // rule out the diamond superABI hierarchy: // it's not an error if the "conflicting" methods // actually come from the same super-ABI // Top // / \ // Left Right // \ / // Bottom // if we are accumulating methods from Left and Right // to place it into Bottom we will encounter // the same method from Top in both Left and Right if self_decl_id != abi_decl.decl_id { let abi_decl = engines.de().get_abi(&abi_decl.decl_id); handler.emit_err( CompileError::ConflictingSuperAbiMethods { span: subabi_span.clone(), method_name: method.name.to_string(), superabi1: abi_decl.name().to_string(), superabi2: self.name.to_string(), }, ); } } } } all_items.push(TyImplItem::Fn( decl_engine .insert( method.to_dummy_func( AbiMode::ImplAbiFn(self.name.clone(), Some(self_decl_id)), Some(type_id), ), None, ) .with_parent(ctx.engines.de(), (*decl_ref.id()).into()), )); } ty::TyTraitInterfaceItem::Constant(decl_ref) => { let const_decl = decl_engine.get_constant(decl_ref); let const_name = const_decl.call_path.suffix.clone(); all_items.push(TyImplItem::Constant(decl_ref.clone())); const_symbols.insert( const_name, ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_ref.id(), }), ); } ty::TyTraitInterfaceItem::Type(decl_ref) => { all_items.push(TyImplItem::Type(decl_ref.clone())); } } } for item in items.iter() { match item { ty::TyTraitItem::Fn(decl_ref) => { let method = decl_engine.get_function(decl_ref); // check if we inherit the same impl method from different branches // XXX this piece of code can be abstracted out into a closure // and reused for interface methods if the issue of mutable ctx is solved if let Ok(superabi_impl_method_ref) = ctx.find_method_for_type( &Handler::default(), type_id, &mod_path, &method.name.clone(), ctx.type_annotation(), &[], None, ) { let superabi_impl_method = ctx.engines.de().get_function(&superabi_impl_method_ref); if let Some(ty::TyDecl::AbiDecl(abi_decl)) = superabi_impl_method.implementing_type.clone() { // allow the diamond superABI hierarchy if self_decl_id != abi_decl.decl_id { let abi_decl = engines.de().get_abi(&abi_decl.decl_id); handler.emit_err(CompileError::ConflictingSuperAbiMethods { span: subabi_span.clone(), method_name: method.name.to_string(), superabi1: abi_decl.name().to_string(), superabi2: self.name.to_string(), }); } } } all_items.push(TyImplItem::Fn( decl_engine .insert_arc( method, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ) .with_parent(ctx.engines.de(), (*decl_ref.id()).into()), )); } ty::TyTraitItem::Constant(decl_ref) => { let const_decl = decl_engine.get_constant(decl_ref); let const_name = const_decl.name().clone(); let const_has_value = const_decl.value.is_some(); let decl_id = decl_engine.insert_arc( const_decl, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ); all_items.push(TyImplItem::Constant(decl_id.clone())); // If this non-interface item has a value, then we want to overwrite the // the previously inserted constant symbol from the interface surface. if const_has_value { const_symbols.insert( const_name, ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_id.id(), }), ); } } ty::TyTraitItem::Type(decl_ref) => { let type_decl = decl_engine.get_type(decl_ref); all_items.push(TyImplItem::Type(decl_engine.insert_arc( type_decl, decl_engine.get_parsed_decl_id(decl_ref.id()).as_ref(), ))); } } } // Insert the constants into the namespace. for (name, decl) in const_symbols.into_iter() { let _ = ctx.insert_symbol(handler, name, decl); } // Insert the methods of the ABI into the namespace. // Specifically do not check for conflicting definitions because // this is just a temporary namespace for type checking and // these are not actual impl blocks. // We check that a contract method cannot call a contract method // from the same ABI later, during method application typechecking. let _ = ctx.insert_trait_implementation( &Handler::default(), CallPath::ident_to_fullpath(self.name.clone(), ctx.namespace()), vec![], type_id, vec![], &all_items, &self.span, Some(self.span()), IsImplSelf::No, IsExtendingExistingImpl::No, IsImplInterfaceSurface::No, ); Ok(()) }) } } impl TypeCheckAnalysis for TyAbiDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for item in self.items.iter() { let _ = item.type_check_analyze(handler, ctx); } Ok(()) }) } } impl TypeCheckFinalization for TyAbiDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for item in self.items.iter_mut() { let _ = item.type_check_finalize(handler, ctx); } Ok(()) }) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/function.rs
sway-core/src/semantic_analysis/ast_node/declaration/function.rs
mod function_parameter; use crate::{ decl_engine::{ parsed_id::ParsedDeclId, DeclEngineInsert as _, DeclId, DeclRefFunction, ParsedDeclEngineGet as _, }, language::{ parsed::*, ty::{self, ConstGenericDecl, TyCodeBlock, TyConstGenericDecl, TyDecl, TyFunctionDecl}, CallPath, CallPathType, Visibility, }, semantic_analysis::*, type_system::*, Engines, }; use ast_elements::type_parameter::GenericTypeParameter; use hashbrown::HashMap; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{style::is_snake_case, Spanned}; use symbol_collection_context::SymbolCollectionContext; impl ty::TyFunctionDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<FunctionDeclaration>, ) -> Result<(), ErrorEmitted> { let fn_decl = engines.pe().get_function(decl_id); let decl = Declaration::FunctionDeclaration(*decl_id); let _ = ctx.insert_parsed_symbol(handler, engines, fn_decl.name.clone(), decl.clone()); // create a namespace for the function let _ = ctx.scoped(engines, fn_decl.span.clone(), Some(decl), |scoped_ctx| { let const_generic_parameters = fn_decl .type_parameters .iter() .filter_map(|x| x.as_const_parameter()) .filter_map(|x| x.id.as_ref()); for const_generic_parameter in const_generic_parameters { let const_generic_decl = engines.pe().get(const_generic_parameter); scoped_ctx.insert_parsed_symbol( handler, engines, const_generic_decl.name.clone(), Declaration::ConstGenericDeclaration(*const_generic_parameter), )?; } TyCodeBlock::collect(handler, engines, scoped_ctx, &fn_decl.body) }); Ok(()) } pub fn type_check( handler: &Handler, mut ctx: TypeCheckContext, fn_decl: &FunctionDeclaration, is_method: bool, is_in_impl_self: bool, implementing_for: Option<TypeId>, ) -> Result<Self, ErrorEmitted> { let mut ty_fn_decl = Self::type_check_signature( handler, ctx.by_ref(), fn_decl, is_method, is_in_impl_self, implementing_for, )?; Self::type_check_body(handler, ctx, fn_decl, &mut ty_fn_decl) } pub fn type_check_signature( handler: &Handler, mut ctx: TypeCheckContext, fn_decl: &FunctionDeclaration, is_method: bool, is_in_impl_self: bool, implementing_for: Option<TypeId>, ) -> Result<Self, ErrorEmitted> { let FunctionDeclaration { name, body: _, parameters, span, attributes, type_parameters, visibility, purity, where_clause, kind, .. } = fn_decl; let mut return_type = fn_decl.return_type.clone(); let type_engine = ctx.engines.te(); // If functions aren't allowed in this location, return an error. if ctx.functions_disallowed() { return Err(handler.emit_err(CompileError::Unimplemented { feature: "Declaring nested functions".to_string(), help: vec![], span: span.clone(), })); } // Warn against non-snake case function names. if !is_snake_case(name.as_str()) { handler.emit_warn(CompileWarning { span: name.span(), warning_content: Warning::NonSnakeCaseFunctionName { name: name.clone() }, }) } // create a namespace for the function ctx.by_ref() .with_const_shadowing_mode(ConstShadowingMode::Sequential) .disallow_functions() .scoped(handler, Some(span.clone()), |ctx| { // Type check the type parameters. let new_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), type_parameters.clone(), None, )?; // const generic parameters let mut already_declared = HashMap::new(); let const_generic_parameters = type_parameters .iter() .filter_map(|x| x.as_const_parameter()); for const_generic in const_generic_parameters { let Some(id) = const_generic.id.as_ref() else { continue; }; let const_generic_decl = ctx.engines.pe().get(id); let decl_ref = ctx.engines.de().insert( TyConstGenericDecl { call_path: CallPath { prefixes: vec![], suffix: const_generic_decl.name.clone(), callpath_type: CallPathType::Ambiguous, }, span: const_generic_decl.span.clone(), return_type: const_generic_decl.ty, value: None, }, Some(id), ); if let Some(old) = already_declared .insert(const_generic_decl.name.clone(), const_generic.span.clone()) { handler.emit_err(CompileError::MultipleDefinitionsOfConstant { name: const_generic_decl.name.clone(), new: const_generic.span.clone(), old, }); } ctx.insert_symbol( handler, const_generic_decl.name.clone(), TyDecl::ConstGenericDecl(ConstGenericDecl { decl_id: *decl_ref.id(), }), )?; } // type check the function parameters, which will also insert them into the namespace let mut new_parameters = vec![]; handler.scope(|handler| { for parameter in parameters.iter() { new_parameters.push({ let param = match ty::TyFunctionParameter::type_check( handler, ctx.by_ref(), parameter.clone(), ) { Ok(val) => val, Err(_) => continue, }; param.insert_into_namespace(handler, ctx.by_ref()); param }); } Ok(()) })?; // type check the return type return_type.type_id = ctx .resolve_type( handler, return_type.type_id, &return_type.span, EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let (visibility, is_contract_call) = if is_method { if is_in_impl_self { (*visibility, false) } else { (Visibility::Public, false) } } else { ( *visibility, matches!(ctx.abi_mode(), AbiMode::ImplAbiFn(..)), ) }; let call_path = CallPath::from(name.clone()).to_fullpath(ctx.engines(), ctx.namespace()); let function_decl = ty::TyFunctionDecl { name: name.clone(), body: <_>::default(), parameters: new_parameters, implementing_type: None, implementing_for, span: span.clone(), call_path, attributes: attributes.clone(), return_type, type_parameters: new_type_parameters, visibility, is_contract_call, purity: *purity, where_clause: where_clause.clone(), is_trait_method_dummy: false, is_type_check_finalized: false, kind: match kind { FunctionDeclarationKind::Default => ty::TyFunctionDeclKind::Default, FunctionDeclarationKind::Entry => ty::TyFunctionDeclKind::Entry, FunctionDeclarationKind::Test => ty::TyFunctionDeclKind::Test, FunctionDeclarationKind::Main => ty::TyFunctionDeclKind::Main, }, }; Ok(function_decl) }) } pub fn type_check_body( handler: &Handler, mut ctx: TypeCheckContext, fn_decl: &FunctionDeclaration, ty_fn_decl: &mut Self, ) -> Result<Self, ErrorEmitted> { // create a namespace for the function ctx.by_ref() .with_const_shadowing_mode(ConstShadowingMode::Sequential) .disallow_functions() .scoped(handler, Some(fn_decl.span.clone()), |ctx| { let FunctionDeclaration { body, .. } = fn_decl; let ty::TyFunctionDecl { parameters, return_type, type_parameters, .. } = ty_fn_decl; // Insert the previously type checked type parameters into the current namespace. // We insert all type parameter before the constraints because some constraints may depend on the parameters. for p in type_parameters.iter() { p.insert_into_namespace_self(handler, ctx.by_ref())?; } for p in type_parameters.iter() { p.insert_into_namespace_constraints(handler, ctx.by_ref())?; } // Insert the previously type checked function parameters into the current namespace. for p in parameters.iter() { p.insert_into_namespace(handler, ctx.by_ref()); } // type check the function body // // If there are no implicit block returns, then we do not want to type check them, so we // stifle the errors. If there _are_ implicit block returns, we want to type_check them. let mut ctx = ctx .by_ref() .with_help_text( "Function body's return type does not match up with its return type annotation.", ) .with_type_annotation(return_type.type_id) .with_function_type_annotation(return_type.type_id); let body = ty::TyCodeBlock::type_check(handler, ctx.by_ref(), body, true) .unwrap_or_else(|_err| ty::TyCodeBlock::default()); ty_fn_decl.body = body; ty_fn_decl.is_type_check_finalized = true; return_type.type_id.check_type_parameter_bounds( handler, ctx.by_ref(), &return_type.span, None, )?; Ok(ty_fn_decl.clone()) }) } } impl TypeCheckAnalysis for DeclId<TyFunctionDecl> { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { let node = ctx.get_node_for_fn_decl(self); if let Some(node) = node { ctx.node_stack.push(node); let item_fn = ctx.engines.de().get_function(self); let _ = item_fn.type_check_analyze(handler, ctx); ctx.node_stack.pop(); } Ok(()) }) } } impl TypeCheckAnalysis for DeclRefFunction { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { let node = ctx.get_node_for_fn_decl(self.id()); if let Some(node) = node { ctx.node_stack.push(node); let item_fn = ctx.engines.de().get_function(self); let _ = item_fn.type_check_analyze(handler, ctx); ctx.node_stack.pop(); } Ok(()) }) } } impl TypeCheckAnalysis for ty::TyFunctionDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { self.body.type_check_analyze(handler, ctx) } } impl TypeCheckFinalization for ty::TyFunctionDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { let _ = self.body.type_check_finalize(handler, ctx); Ok(()) }) } } #[test] fn test_function_selector_behavior() { use crate::ast_elements::type_argument::GenericTypeArgument; use crate::language::Visibility; use crate::Engines; use sway_types::{Ident, Span}; let engines = Engines::default(); let handler = Handler::default(); let decl = ty::TyFunctionDecl { purity: Default::default(), name: Ident::dummy(), implementing_type: None, implementing_for: None, body: ty::TyCodeBlock::default(), parameters: vec![], span: Span::dummy(), call_path: CallPath::from(Ident::dummy()), attributes: Default::default(), return_type: TypeId::from(0).into(), type_parameters: vec![], visibility: Visibility::Public, is_contract_call: false, where_clause: vec![], is_trait_method_dummy: false, is_type_check_finalized: true, kind: ty::TyFunctionDeclKind::Default, }; let selector_text = decl .to_selector_name(&handler, &engines) .expect("test failure"); assert_eq!(selector_text, "foo()".to_string()); let decl = ty::TyFunctionDecl { purity: Default::default(), name: Ident::new_with_override("bar".into(), Span::dummy()), implementing_type: None, implementing_for: None, body: ty::TyCodeBlock::default(), parameters: vec![ ty::TyFunctionParameter { name: Ident::dummy(), is_reference: false, is_mutable: false, mutability_span: Span::dummy(), type_argument: engines .te() .insert_string_array_without_annotations(&engines, 5) .into(), }, ty::TyFunctionParameter { name: Ident::new_no_span("baz".into()), is_reference: false, is_mutable: false, mutability_span: Span::dummy(), type_argument: GenericTypeArgument { type_id: engines.te().id_of_u32(), initial_type_id: engines .te() .insert_string_array_without_annotations(&engines, 5), span: Span::dummy(), call_path_tree: None, }, }, ], span: Span::dummy(), call_path: CallPath::from(Ident::dummy()), attributes: Default::default(), return_type: TypeId::from(0).into(), type_parameters: vec![], visibility: Visibility::Public, is_contract_call: false, where_clause: vec![], is_trait_method_dummy: false, is_type_check_finalized: true, kind: ty::TyFunctionDeclKind::Default, }; let selector_text = decl .to_selector_name(&handler, &engines) .expect("test failure"); assert_eq!(selector_text, "bar(str[5],u32)".to_string()); }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/storage.rs
sway-core/src/semantic_analysis/ast_node/declaration/storage.rs
use std::collections::HashMap; use crate::{ decl_engine::parsed_id::ParsedDeclId, fuel_prelude::fuel_tx::StorageSlot, ir_generation::{ const_eval::compile_constant_expression_to_constant, storage::serialize_to_storage_slots, }, language::{ parsed::StorageDeclaration, ty::{self, TyExpression, TyStorageField}, }, metadata::MetadataManager, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, Engines, }; use fuel_vm::fuel_tx::Bytes32; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, warning::CompileWarning, }; use sway_ir::{ConstantValue, Context, Module}; use sway_types::{u256::U256, Spanned}; impl ty::TyStorageDecl { pub(crate) fn collect( _handler: &Handler, _engines: &Engines, _ctx: &mut SymbolCollectionContext, _decl_id: &ParsedDeclId<StorageDeclaration>, ) -> Result<(), ErrorEmitted> { Ok(()) } pub(crate) fn get_initialized_storage_slots( &self, handler: &Handler, engines: &Engines, context: &mut Context, md_mgr: &mut MetadataManager, module: Module, ) -> Result<Vec<StorageSlot>, ErrorEmitted> { handler.scope(|handler| { let mut slot_fields = HashMap::<Bytes32, TyStorageField>::new(); let storage_slots = self .fields .iter() .map(|f| { let slots = f.get_initialized_storage_slots(engines, context, md_mgr, module); // Check if slot with same key was already used and throw warning. if let Ok(slots) = &slots { for s in slots.iter() { if let Some(old_field) = slot_fields.insert(*s.key(), f.clone()) { handler.emit_warn(CompileWarning { span: f.span(), warning_content: sway_error::warning::Warning::DuplicatedStorageKey { first_field: (&old_field.name).into(), first_field_full_name: old_field.full_name(), first_field_key_is_compiler_generated: old_field .key_expression .is_none(), second_field: (&f.name).into(), second_field_full_name: f.full_name(), second_field_key_is_compiler_generated: f .key_expression .is_none(), key: format!("0x{:x}", s.key()), }, }) } } } slots }) .filter_map(|s| s.map_err(|e| handler.emit_err(e)).ok()) .flatten() .collect::<Vec<_>>(); Ok(storage_slots) }) } } impl ty::TyStorageField { pub(crate) fn get_initialized_storage_slots( &self, engines: &Engines, context: &mut Context, md_mgr: &mut MetadataManager, module: Module, ) -> Result<Vec<StorageSlot>, CompileError> { let key = Self::get_key_expression_const(&self.key_expression, engines, context, md_mgr, module)?; compile_constant_expression_to_constant( engines, context, md_mgr, module, None, None, &self.initializer, ) .map(|constant| { serialize_to_storage_slots( &constant, context, self.namespace_names .iter() .map(|i| i.as_str().to_string()) .chain(vec![self.name.as_str().to_string()]) .collect(), key, &constant.get_content(context).ty, ) }) } pub(crate) fn get_key_expression_const( key_expression: &Option<TyExpression>, engines: &Engines, context: &mut Context, md_mgr: &mut MetadataManager, module: Module, ) -> Result<Option<U256>, CompileError> { if let Some(key_expression) = key_expression { let const_key = compile_constant_expression_to_constant( engines, context, md_mgr, module, None, None, key_expression, )?; if let ConstantValue::B256(key) = const_key.get_content(context).value.clone() { Ok(Some(key)) } else { Err(CompileError::Internal( "Storage keys must have type \"b256\".", key_expression.span.clone(), )) } } else { Ok(None) } } } impl TypeCheckAnalysis for ty::TyStorageDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for field in self.fields.iter() { let _ = field.type_check_analyze(handler, ctx); } Ok(()) }) } } impl TypeCheckAnalysis for ty::TyStorageField { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { self.initializer.type_check_analyze(handler, ctx) } } impl TypeCheckFinalization for ty::TyStorageDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { handler.scope(|handler| { for field in self.fields.iter_mut() { let _ = field.type_check_finalize(handler, ctx); } Ok(()) }) } } impl TypeCheckFinalization for ty::TyStorageField { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { self.initializer.type_check_finalize(handler, ctx) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/configurable.rs
sway-core/src/semantic_analysis/ast_node/declaration/configurable.rs
use std::collections::VecDeque; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{style::is_screaming_snake_case, Spanned}; use symbol_collection_context::SymbolCollectionContext; use crate::{ ast_elements::{type_argument::GenericTypeArgument, type_parameter::GenericTypeParameter}, decl_engine::{ parsed_id::ParsedDeclId, DeclEngineGetParsedDeclId, DeclEngineInsert, ReplaceDecls, }, language::{ parsed::*, ty::{self, TyConfigurableDecl, TyExpression}, CallPath, CallPathType, }, semantic_analysis::*, EnforceTypeArguments, Engines, GenericArgument, SubstTypes, TypeBinding, TypeCheckTypeBinding, }; impl ty::TyConfigurableDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<ConfigurableDeclaration>, ) -> Result<(), ErrorEmitted> { let configurable_decl = engines.pe().get_configurable(decl_id); ctx.insert_parsed_symbol( handler, engines, configurable_decl.name.clone(), Declaration::ConfigurableDeclaration(*decl_id), )?; if let Some(value) = &configurable_decl.value { TyExpression::collect(handler, engines, ctx, value)?; } Ok(()) } pub fn type_check( handler: &Handler, mut ctx: TypeCheckContext, decl: ConfigurableDeclaration, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); let ConfigurableDeclaration { name, span, mut type_ascription, value, attributes, visibility, block_keyword_span, } = decl; type_ascription.type_id = ctx .resolve_type( handler, type_ascription.type_id, &type_ascription.span, EnforceTypeArguments::No, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); // this subst is required to replace associated types, namely TypeInfo::TraitType. type_ascription.type_id.subst(&ctx.subst_ctx(handler)); if !is_screaming_snake_case(name.as_str()) { handler.emit_warn(CompileWarning { span: name.span(), warning_content: Warning::NonScreamingSnakeCaseConstName { name: name.clone() }, }) } // Configurables using encoding v1 will be encoded and must be type_checked into "slice" let (value, decode_fn) = if ctx.experimental.new_encoding { let mut ctx = ctx .by_ref() .with_type_annotation(type_engine.id_of_raw_slice()) .with_help_text("Configurables must evaluate to slices."); let value = value.map(|value| { ty::TyExpression::type_check(handler, ctx.by_ref(), &value) .unwrap_or_else(|err| ty::TyExpression::error(err, name.span(), engines)) }); let mut arguments = VecDeque::default(); arguments.push_back(engines.te().id_of_raw_slice()); arguments.push_back(engines.te().id_of_u64()); arguments.push_back(engines.te().id_of_raw_slice()); let value_span = value .as_ref() .map(|x| x.span.clone()) .unwrap_or_else(|| span.clone()); let abi_decode_in_place_handler = Handler::default(); let r = crate::TypeBinding::type_check( &mut TypeBinding::<CallPath> { inner: CallPath { prefixes: vec![], suffix: sway_types::Ident::new_with_override( "abi_decode_in_place".into(), value_span.clone(), ), callpath_type: CallPathType::Ambiguous, }, type_arguments: crate::TypeArgs::Regular(vec![GenericArgument::Type( GenericTypeArgument { type_id: type_ascription.type_id, initial_type_id: type_ascription.type_id, span: sway_types::Span::dummy(), call_path_tree: None, }, )]), span: value_span.clone(), }, &abi_decode_in_place_handler, ctx.by_ref(), ); // Map expected errors to more understandable ones handler.map_and_emit_errors_from(abi_decode_in_place_handler, |e| match e { CompileError::SymbolNotFound { .. } => { Some(CompileError::ConfigurableMissingAbiDecodeInPlace { span: block_keyword_span.clone(), }) } e => Some(e), })?; let (decode_fn_ref, _, _): (crate::decl_engine::DeclRefFunction, _, _) = r?; let decode_fn_id = *decode_fn_ref.id(); let mut decode_fn_decl = (*engines.de().get_function(&decode_fn_id)).clone(); let decl_mapping = GenericTypeParameter::gather_decl_mapping_from_trait_constraints( handler, ctx.by_ref(), &decode_fn_decl.type_parameters, decode_fn_decl.name.as_str(), &span, )?; decode_fn_decl.replace_decls(&decl_mapping, handler, &mut ctx)?; let decode_fn_ref = engines .de() .insert( decode_fn_decl, engines.de().get_parsed_decl_id(&decode_fn_id).as_ref(), ) .with_parent(engines.de(), decode_fn_id.into()); (value, Some(decode_fn_ref)) } else { // while configurables using encoding v0 will typed as the configurable type itself let mut ctx = ctx .by_ref() .with_type_annotation(type_ascription.type_id) .with_help_text( "This declaration's type annotation does not match up with the assigned \ expression's type.", ); let value = value.map(|value| { ty::TyExpression::type_check(handler, ctx.by_ref(), &value) .unwrap_or_else(|err| ty::TyExpression::error(err, name.span(), engines)) }); (value, None) }; let mut call_path: CallPath = name.into(); call_path = call_path.to_fullpath(engines, ctx.namespace()); Ok(ty::TyConfigurableDecl { call_path, attributes, return_type: type_ascription.type_id, type_ascription, span, value, decode_fn, visibility, }) } pub(crate) fn forbid_const_generics( &self, handler: &Handler, engines: &Engines, ) -> Result<(), ErrorEmitted> { if self.type_ascription.type_id.has_const_generics(engines) { Err( handler.emit_err(CompileError::ConstGenericNotSupportedHere { span: self.type_ascription.span.clone(), }), ) } else { Ok(()) } } } impl TypeCheckAnalysis for TyConfigurableDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { if let Some(value) = self.value.as_ref() { value.type_check_analyze(handler, ctx)?; } Ok(()) } } impl TypeCheckFinalization for TyConfigurableDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { if let Some(value) = self.value.as_mut() { value.type_check_finalize(handler, ctx)?; } Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs
sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs
#![allow(clippy::mutable_key_type)] use std::{ collections::{BTreeMap, HashMap, HashSet}, sync::Arc, vec, }; use ast_elements::type_parameter::GenericTypeParameter; use itertools::Itertools; use sway_error::{ error::{CompileError, InterfaceName}, handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{Ident, Named, Span, Spanned}; use crate::{ decl_engine::{parsed_id::ParsedDeclId, *}, engine_threading::*, language::{ parsed::*, ty::{ self, ConstGenericDecl, ConstantDecl, TyConstGenericDecl, TyConstantDecl, TyDecl, TyFunctionDecl, TyImplItem, TyImplSelfOrTrait, TyTraitInterfaceItem, TyTraitItem, TyTraitType, }, *, }, namespace::{IsExtendingExistingImpl, IsImplInterfaceSurface, IsImplSelf, TraitMap}, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, AbiMode, ConstShadowingMode, TyNodeDepGraphNodeId, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, type_system::*, }; impl TyImplSelfOrTrait { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<ImplSelfOrTrait>, ) -> Result<(), ErrorEmitted> { let impl_trait = engines.pe().get_impl_self_or_trait(decl_id); let decl = Declaration::ImplSelfOrTrait(*decl_id); ctx.insert_parsed_symbol( handler, engines, impl_trait.trait_name.suffix.clone(), decl.clone(), )?; let _ = ctx.scoped( engines, impl_trait.block_span.clone(), Some(decl), |scoped_ctx| { let const_generic_parameters = impl_trait .impl_type_parameters .iter() .filter_map(|x| x.as_const_parameter()) .filter_map(|x| x.id.as_ref()); for const_generic_parameter in const_generic_parameters { let const_generic_decl = engines.pe().get(const_generic_parameter); scoped_ctx.insert_parsed_symbol( handler, engines, const_generic_decl.name.clone(), Declaration::ConstGenericDeclaration(*const_generic_parameter), )?; } impl_trait.items.iter().for_each(|item| match item { ImplItem::Fn(decl_id) => { let _ = TyFunctionDecl::collect(handler, engines, scoped_ctx, decl_id); } ImplItem::Constant(decl_id) => { let _ = TyConstantDecl::collect(handler, engines, scoped_ctx, decl_id); } ImplItem::Type(decl_id) => { let _ = TyTraitType::collect(handler, engines, scoped_ctx, decl_id); } }); Ok(()) }, ); Ok(()) } pub(crate) fn type_check_impl_trait( handler: &Handler, mut ctx: TypeCheckContext, impl_trait: ImplSelfOrTrait, ) -> Result<Self, ErrorEmitted> { // If the impl trait represents an attempt to explicit implement a marker trait // we will emit an error, but do not want to bail out. We still want to check // the particular explicit implementation for errors, which will be done in the // remaining part of the type check. // At the very end of the type checking, we again inspect the result of this check // and return error if there was one. let is_marker_trait_explicit_impl_result = handler.scope(|handler| { let resolved_decl = ctx.resolve_call_path(handler, &impl_trait.trait_name)?; let trait_decl = match &resolved_decl { TyDecl::TraitDecl(decl) => ctx.engines.de().get_trait(&decl.decl_id), // This should never be the case, but even if it happens, we just return `Ok` and continue // with the type checking that anyhow needs to handle it. _ => return Ok(()), }; // Explicit impl means it is not autogenerated by the compiler and is not in the "std::marker" module. if trait_decl.is_marker_trait() && !(ctx.namespace.current_module().is_std_marker_module() || impl_trait.is_autogenerated(ctx.engines.se())) { Err( handler.emit_err(CompileError::MarkerTraitExplicitlyImplemented { marker_trait_full_name: trait_decl .call_path .to_string_with_args(ctx.engines, &impl_trait.trait_type_arguments), span: impl_trait.trait_name.span(), }), ) } else { Ok(()) } }); let ImplSelfOrTrait { impl_type_parameters, trait_name, mut trait_type_arguments, trait_decl_ref: _, mut implementing_for, items, block_span, .. } = impl_trait; let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); // Create a new type parameter for the Self type. // For the `use_site_span` of the self type parameter we take the `block_span`. // This is the span of the whole impl trait and block and thus, points to // the code in the source file in which the self type is used in the implementation. let self_type_use_site_span = block_span.clone(); let self_type_param = GenericTypeParameter::new_self_type(engines, self_type_use_site_span.clone()); let self_type_id = self_type_param.type_id; // create a namespace for the impl ctx.by_ref() .with_const_shadowing_mode(ConstShadowingMode::ItemStyle) .with_self_type(Some(self_type_id)) .allow_functions() .scoped(handler, Some(block_span.clone()), |ctx| { let const_generic_parameters = impl_type_parameters .iter() .filter_map(|x| x.as_const_parameter()) .filter_map(|x| x.id.as_ref()); for const_generic_decl_id in const_generic_parameters { let const_generic_decl = engines.pe().get(const_generic_decl_id); let decl_ref = engines.de().insert( TyConstGenericDecl { call_path: CallPath { prefixes: vec![], suffix: const_generic_decl.name.clone(), callpath_type: CallPathType::Ambiguous, }, span: const_generic_decl.span.clone(), return_type: const_generic_decl.ty, value: None, }, Some(const_generic_decl_id), ); ctx.insert_symbol( handler, const_generic_decl.name.clone(), TyDecl::ConstGenericDecl(ConstGenericDecl { decl_id: *decl_ref.id(), }), )?; } // Type check the type parameters let new_impl_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), impl_type_parameters, Some(self_type_param), )?; // resolve the types of the trait type arguments for type_arg in trait_type_arguments.iter_mut() { *type_arg.type_id_mut() = ctx.resolve_type( handler, type_arg.type_id(), &type_arg.span(), EnforceTypeArguments::Yes, None, )?; } // type check the type that we are implementing for implementing_for.type_id = ctx.resolve_type( handler, implementing_for.type_id, &implementing_for.span(), EnforceTypeArguments::Yes, None, )?; // check to see if this type is supported in impl blocks type_engine .get(implementing_for.type_id) .expect_is_supported_in_impl_blocks_self( handler, Some(&trait_name.suffix), &implementing_for.span(), )?; // check for unconstrained type parameters check_for_unconstrained_type_parameters( handler, engines, &new_impl_type_parameters, &trait_type_arguments, implementing_for.type_id, )?; // Unify the "self" type param and the type that we are implementing for handler.scope(|h| { type_engine.unify_with_self( h, engines, implementing_for.type_id, self_type_id, &implementing_for.span(), "", || None, ); Ok(()) })?; // Update the context let mut ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.new_unknown()) .with_self_type(Some(implementing_for.type_id)); let impl_trait = match ctx.resolve_call_path(handler, &trait_name).ok() { Some(ty::TyDecl::TraitDecl(ty::TraitDecl { decl_id, .. })) => { let mut trait_decl = (*decl_engine.get_trait(&decl_id)).clone(); // the following essentially is needed to map `Self` to `implementing_for` // during trait decl monomorphization trait_decl .type_parameters .push(trait_decl.self_type.clone()); trait_type_arguments.push(GenericArgument::Type(implementing_for.clone())); // monomorphize the trait declaration ctx.monomorphize( handler, &mut trait_decl, &mut trait_type_arguments, BTreeMap::new(), EnforceTypeArguments::Yes, &trait_name.span(), )?; // restore type parameters and type arguments trait_decl.type_parameters.pop(); trait_type_arguments.pop(); // Insert the interface surface and methods from this trait into // the namespace. trait_decl.insert_interface_surface_and_items_into_namespace( handler, ctx.by_ref(), &trait_name, &trait_type_arguments, implementing_for.type_id, ); let (new_items, supertrait_items) = type_check_trait_implementation( handler, ctx.by_ref(), implementing_for.type_id, &new_impl_type_parameters, &trait_decl.type_parameters, &trait_type_arguments, &trait_decl.supertraits, &trait_decl.interface_surface, &trait_decl.items, &items, &trait_name, &trait_decl.span(), &block_span, false, )?; ty::TyImplSelfOrTrait { impl_type_parameters: new_impl_type_parameters, trait_name: trait_name.clone(), trait_type_arguments, trait_decl_ref: Some(DeclRef::new( trait_decl.name.clone(), decl_id.into(), trait_decl.span.clone(), )), span: block_span, items: new_items, supertrait_items, implementing_for, } } Some(ty::TyDecl::AbiDecl(ty::AbiDecl { decl_id, .. })) => { // if you are comparing this with the `impl_trait` branch above, note that // there are no type arguments here because we don't support generic types // in contract ABIs yet (or ever?) due to the complexity of communicating // the ABI layout in the descriptor file. let abi = decl_engine.get_abi(&decl_id); if !type_engine.get(implementing_for.type_id).eq( &TypeInfo::Contract, &PartialEqWithEnginesContext::new(engines), ) { handler.emit_err(CompileError::ImplAbiForNonContract { span: implementing_for.span(), ty: engines.help_out(implementing_for.type_id).to_string(), }); } let self_type_param = GenericTypeParameter::new_self_type(engines, self_type_use_site_span); // Unify the "self" type param from the abi declaration with // the type that we are implementing for. handler.scope(|h| { type_engine.unify_with_self( h, engines, implementing_for.type_id, self_type_param.type_id, &implementing_for.span(), "", || None, ); Ok(()) })?; let mut ctx = ctx.with_abi_mode(AbiMode::ImplAbiFn(abi.name.clone(), None)); // Insert the interface surface and methods from this trait into // the namespace. let _ = abi.insert_interface_surface_and_items_into_namespace( handler, decl_id, ctx.by_ref(), implementing_for.type_id, None, ); let (new_items, supertrait_items) = type_check_trait_implementation( handler, ctx.by_ref(), implementing_for.type_id, &[], // this is empty because abi definitions don't support generics, &[], // this is empty because abi definitions don't support generics, &[], // this is empty because abi definitions don't support generics, &abi.supertraits, &abi.interface_surface, &abi.items, &items, &trait_name, &abi.span(), &block_span, true, )?; // Check that the contract doesn't have selector collisions let _ = check_for_function_selector_collisions( handler, ctx.by_ref(), &new_items, ); ty::TyImplSelfOrTrait { impl_type_parameters: vec![], // this is empty because abi definitions don't support generics trait_name, trait_type_arguments: vec![], // this is empty because abi definitions don't support generics trait_decl_ref: Some(DeclRef::new( abi.name.clone(), decl_id.into(), abi.span.clone(), )), span: block_span, items: new_items, supertrait_items, implementing_for, } } Some(_) | None => { return Err(handler.emit_err(CompileError::UnknownTrait { name: trait_name.suffix.clone(), span: trait_name.span(), })); } }; // If there was no error on explicit implementation of a marker trait, // return the type-checked trait. Otherwise, return the emitted error. match is_marker_trait_explicit_impl_result { Ok(_) => Ok(impl_trait), Err(err) => Err(err), } }) } pub(crate) fn type_check_impl_self( handler: &Handler, ctx: TypeCheckContext, parsed_decl_id: &ParsedDeclId<ImplSelfOrTrait>, impl_self: ImplSelfOrTrait, ) -> Result<ty::TyDecl, ErrorEmitted> { let ImplSelfOrTrait { impl_type_parameters, mut implementing_for, items, block_span, .. } = impl_self; let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); // create the namespace for the impl ctx.with_const_shadowing_mode(ConstShadowingMode::ItemStyle) .allow_functions() .scoped(handler, Some(block_span.clone()), |ctx| { let const_generic_parameters = impl_type_parameters .iter() .filter_map(|x| x.as_const_parameter()) .filter_map(|x| x.id.as_ref()); for const_generic_decl_id in const_generic_parameters { let const_generic_decl = engines.pe().get(const_generic_decl_id); let decl_ref = engines.de().insert( TyConstGenericDecl { call_path: CallPath { prefixes: vec![], suffix: const_generic_decl.name.clone(), callpath_type: CallPathType::Ambiguous, }, span: const_generic_decl.span.clone(), return_type: const_generic_decl.ty, value: None, }, Some(const_generic_decl_id), ); ctx.insert_symbol( handler, const_generic_decl.name.clone(), TyDecl::ConstGenericDecl(ConstGenericDecl { decl_id: *decl_ref.id(), }), )?; } // Create a new type parameter for the self type. let self_type_param = // Same as with impl trait or ABI, we take the `block_span` as the `use_site_span` // of the self type. GenericTypeParameter::new_self_type(engines, block_span.clone()); let self_type_id = self_type_param.type_id; // create the trait name let suffix = match &&*type_engine.get(implementing_for.type_id) { TypeInfo::Custom { qualified_call_path: call_path, .. } => call_path.call_path.suffix.clone(), _ => Ident::new_with_override("r#Self".into(), implementing_for.span.clone()), }; let trait_name = CallPath::ident_to_fullpath(suffix, ctx.namespace()); // Type check the type parameters. let new_impl_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), impl_type_parameters, Some(self_type_param), )?; // type check the type that we are implementing for implementing_for.type_id = ctx.resolve_type( handler, implementing_for.type_id, &implementing_for.span, EnforceTypeArguments::Yes, None, )?; // check to see if this type is supported in impl blocks type_engine .get(implementing_for.type_id) .expect_is_supported_in_impl_blocks_self( handler, None, &implementing_for.span(), )?; // check for unconstrained type parameters check_for_unconstrained_type_parameters( handler, engines, &new_impl_type_parameters, &[], implementing_for.type_id, )?; // Disallow inherent implementations for types defined outside the current package let current_pkg = ctx.namespace().current_package_ref(); let is_external = match &*type_engine.get_unaliased(implementing_for.type_id) { TypeInfo::Struct(decl_id) => { let s = decl_engine.get_struct(decl_id); let pkg_name = s.call_path.prefixes.first().map(|p| p.as_str()); pkg_name .map(|name| name != current_pkg.name().as_str()) .unwrap_or_default() } TypeInfo::Enum(decl_id) => { let e = decl_engine.get_enum(decl_id); let pkg_name = e.call_path.prefixes.first().map(|p| p.as_str()); pkg_name .map(|name| name != current_pkg.name().as_str()) .unwrap_or_default() } _ => false, }; // Temporary workaround: allow inherent impls on `std::storage::storage_key::StorageKey<_>`. let is_storage_key_in_std = match &*type_engine.get_unaliased(implementing_for.type_id) { TypeInfo::Struct(decl_id) => { let s = decl_engine.get_struct(decl_id); s.call_path.suffix.as_str() == "StorageKey" && s.call_path.prefixes.len() == 3 && s.call_path.prefixes[0].as_str() == "std" && s.call_path.prefixes[1].as_str() == "storage" && s.call_path.prefixes[2].as_str() == "storage_key" } _ => false, }; if is_external && !is_storage_key_in_std { let type_name = engines.help_out(implementing_for.type_id).to_string(); let type_definition_span = match &*type_engine .get_unaliased(implementing_for.type_id) { TypeInfo::Struct(decl_id) => { Some(decl_engine.get_struct(decl_id).span.clone()) } TypeInfo::Enum(decl_id) => Some(decl_engine.get_enum(decl_id).span.clone()), _ => None, }; handler.emit_warn(CompileWarning { span: implementing_for.span(), warning_content: Warning::InherentImplForExternalType { type_name, type_definition_span, }, }); return Err(handler.cancel()); } implementing_for.type_id.check_type_parameter_bounds( handler, ctx.by_ref(), &implementing_for.span(), None, )?; // Unify the "self" type param and the type that we are implementing for handler.scope(|h| { type_engine.unify_with_self( h, engines, implementing_for.type_id, self_type_id, &implementing_for.span(), "", || None, ); Ok(()) })?; let mut ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.new_unknown()); // type check the items inside of the impl block let mut new_items = vec![]; handler.scope(|handler| { for item in items.iter() { match item { ImplItem::Fn(id) => { let fn_decl = engines.pe().get_function(id); let Ok(fn_decl) = ty::TyFunctionDecl::type_check_signature( handler, ctx.by_ref(), &fn_decl, true, true, Some(implementing_for.type_id), ) else { continue; }; new_items .push(TyImplItem::Fn(decl_engine.insert(fn_decl, Some(id)))); } ImplItem::Constant(decl_id) => { let const_decl = engines.pe().get_constant(decl_id).as_ref().clone(); let const_decl = match ty::TyConstantDecl::type_check( handler, ctx.by_ref(), const_decl, ) { Ok(res) => res, Err(_) => continue, }; let decl_ref = decl_engine.insert(const_decl, Some(decl_id)); new_items.push(TyImplItem::Constant(decl_ref.clone())); ctx.insert_symbol( handler, decl_ref.name().clone(), ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id: *decl_ref.id(), }), )?; } ImplItem::Type(decl_id) => { let type_decl = engines.pe().get_trait_type(decl_id).as_ref().clone(); let type_decl = match ty::TyTraitType::type_check( handler, ctx.by_ref(), type_decl.clone(), ) { Ok(res) => res, Err(_) => continue, }; let decl_ref = decl_engine.insert(type_decl, Some(decl_id)); new_items.push(TyImplItem::Type(decl_ref.clone())); } } } let mut impl_trait = ty::TyImplSelfOrTrait { impl_type_parameters: new_impl_type_parameters, trait_name, trait_type_arguments: vec![], // this is empty because impl self's don't support generics on the "Self" trait, trait_decl_ref: None, span: block_span, items: new_items, supertrait_items: vec![], implementing_for, }; ctx.insert_trait_implementation( handler, impl_trait.trait_name.clone(), impl_trait.trait_type_arguments.clone(), impl_trait.implementing_for.type_id, impl_trait.impl_type_parameters.clone(), &impl_trait.items, &impl_trait.span, impl_trait .trait_decl_ref .as_ref() .map(|decl_ref| decl_ref.decl_span().clone()), IsImplSelf::Yes, IsExtendingExistingImpl::No, IsImplInterfaceSurface::No, )?; let new_items = &impl_trait.items; for (item, new_item) in items.clone().into_iter().zip(new_items) { match (item, new_item) { (ImplItem::Fn(fn_decl_id), TyTraitItem::Fn(decl_ref)) => { let fn_decl = engines.pe().get_function(&fn_decl_id); let mut ty_fn_decl = (*decl_engine.get_function(decl_ref.id())).clone(); let new_ty_fn_decl = match ty::TyFunctionDecl::type_check_body( handler, ctx.by_ref(), &fn_decl, &mut ty_fn_decl, ) { Ok(res) => res, Err(_) => continue, }; decl_engine.replace(*decl_ref.id(), new_ty_fn_decl); } (ImplItem::Constant(_const_decl), TyTraitItem::Constant(_decl_ref)) => { // Already processed. } (ImplItem::Type(_type_decl), TyTraitItem::Type(_decl_ref)) => { // Already processed. } _ => { handler.emit_err(CompileError::Internal(
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/mod.rs
sway-core/src/semantic_analysis/ast_node/declaration/mod.rs
mod abi; pub mod auto_impl; mod configurable; mod constant; #[allow(clippy::module_inception)] mod declaration; mod r#enum; mod function; mod impl_trait; mod storage; mod r#struct; mod supertrait; mod r#trait; mod trait_fn; mod trait_type; mod type_alias; mod variable; pub(crate) use supertrait::*;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/constant.rs
sway-core/src/semantic_analysis/ast_node/declaration/constant.rs
use sway_error::{ handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{style::is_screaming_snake_case, Spanned}; use symbol_collection_context::SymbolCollectionContext; use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{ parsed::{self, *}, ty::{self, TyConstantDecl, TyExpression}, CallPath, }, semantic_analysis::*, EnforceTypeArguments, Engines, SubstTypes, TypeInfo, }; impl ty::TyConstantDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<ConstantDeclaration>, ) -> Result<(), ErrorEmitted> { let constant_decl = engines.pe().get_constant(decl_id); ctx.insert_parsed_symbol( handler, engines, constant_decl.name.clone(), Declaration::ConstantDeclaration(*decl_id), )?; if let Some(value) = &constant_decl.value { TyExpression::collect(handler, engines, ctx, value)?; } Ok(()) } pub fn type_check( handler: &Handler, mut ctx: TypeCheckContext, decl: ConstantDeclaration, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); let ConstantDeclaration { name, span, mut type_ascription, value, attributes, visibility, } = decl.clone(); type_ascription.type_id = ctx .resolve_type( handler, type_ascription.type_id, &type_ascription.span(), EnforceTypeArguments::No, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); // this subst is required to replace associated types, namely TypeInfo::TraitType. type_ascription.type_id.subst(&ctx.subst_ctx(handler)); if !is_screaming_snake_case(name.as_str()) { handler.emit_warn(CompileWarning { span: name.span(), warning_content: Warning::NonScreamingSnakeCaseConstName { name: name.clone() }, }) } let mut ctx = ctx .by_ref() .with_type_annotation(type_ascription.type_id) .with_help_text( "This declaration's type annotation does not match up with the assigned \ expression's type.", ); let value = value.map(|value| { ty::TyExpression::type_check(handler, ctx.by_ref(), &value) .unwrap_or_else(|err| ty::TyExpression::error(err, name.span(), engines)) }); // Integers are special in the sense that we can't only rely on the type of `expression` // to get the type of the variable. The type of the variable *has* to follow // `type_ascription` if `type_ascription` is a concrete integer type that does not // conflict with the type of `expression` (i.e. passes the type checking above). let return_type = match &*type_engine.get(type_ascription.type_id) { TypeInfo::UnsignedInteger(_) => type_ascription.type_id, _ => match &value { Some(value) => value.return_type, None => type_ascription.type_id, }, }; let mut call_path: CallPath = name.into(); call_path = call_path.to_fullpath(engines, ctx.namespace()); Ok(ty::TyConstantDecl { call_path, attributes, return_type, type_ascription, span, value, visibility, }) } /// Used to create a stubbed out constant when the constant fails to /// compile, preventing cascading namespace errors. pub(crate) fn error(engines: &Engines, decl: parsed::ConstantDeclaration) -> TyConstantDecl { let type_engine = engines.te(); let parsed::ConstantDeclaration { name, span, visibility, type_ascription, .. } = decl; let call_path: CallPath = name.into(); TyConstantDecl { call_path, span, attributes: Default::default(), return_type: type_engine.new_unknown(), type_ascription, value: None, visibility, } } } impl TypeCheckAnalysis for TyConstantDecl { fn type_check_analyze( &self, handler: &Handler, ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { if let Some(value) = self.value.as_ref() { value.type_check_analyze(handler, ctx)?; } Ok(()) } } impl TypeCheckFinalization for TyConstantDecl { fn type_check_finalize( &mut self, handler: &Handler, ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { if let Some(value) = self.value.as_mut() { value.type_check_finalize(handler, ctx)?; } Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/trait_fn.rs
sway-core/src/semantic_analysis/ast_node/declaration/trait_fn.rs
use sway_types::Spanned; use crate::{ decl_engine::{parsed_id::ParsedDeclId, DeclId}, language::{ parsed::{self, Declaration, TraitFn}, ty, CallPath, Visibility, }, semantic_analysis::symbol_collection_context::SymbolCollectionContext, Engines, }; use sway_error::handler::{ErrorEmitted, Handler}; use crate::{ semantic_analysis::{AbiMode, TypeCheckContext}, type_system::*, }; impl ty::TyTraitFn { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<TraitFn>, ) -> Result<(), ErrorEmitted> { let trait_fn = engines.pe().get_trait_fn(decl_id); let decl = Declaration::TraitFnDeclaration(*decl_id); ctx.insert_parsed_symbol(handler, engines, trait_fn.name.clone(), decl.clone())?; let _ = ctx.scoped(engines, trait_fn.span.clone(), Some(decl), |_scoped_ctx| { Ok(()) }); Ok(()) } pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, trait_fn: &parsed::TraitFn, ) -> Result<ty::TyTraitFn, ErrorEmitted> { let parsed::TraitFn { name, span, purity, parameters, return_type, attributes, } = trait_fn; let type_engine = ctx.engines.te(); // Create a namespace for the trait function. ctx.by_ref().scoped(handler, Some(span.clone()), |ctx| { // TODO: when we add type parameters to trait fns, type check them here // Type check the parameters. let mut typed_parameters = vec![]; for param in parameters.iter() { typed_parameters.push( match ty::TyFunctionParameter::type_check_interface_parameter( handler, ctx.by_ref(), param, ) { Ok(res) => res, Err(_) => continue, }, ); } // Type check the return type. let mut new_return_type = return_type.clone(); new_return_type.type_id = ctx .resolve_type( handler, return_type.type_id, &return_type.span, EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let trait_fn = ty::TyTraitFn { name: name.clone(), span: span.clone(), parameters: typed_parameters, return_type: new_return_type, purity: *purity, attributes: attributes.clone(), }; Ok(trait_fn) }) } /// This function is used in trait declarations to insert "placeholder" /// functions in the methods. This allows the methods to use functions /// declared in the interface surface. pub(crate) fn to_dummy_func( &self, abi_mode: AbiMode, implementing_for: Option<TypeId>, ) -> ty::TyFunctionDecl { ty::TyFunctionDecl { purity: self.purity, name: self.name.clone(), body: <_>::default(), parameters: self.parameters.clone(), implementing_type: match &abi_mode { AbiMode::ImplAbiFn(_abi_name, abi_decl_id) => { // ABI and their super-ABI methods cannot have the same names, // so in order to provide meaningful error messages if this condition // is violated, we need to keep track of ABI names before we can // provide type-checked `AbiDecl`s Some(ty::TyDecl::AbiDecl(ty::AbiDecl { decl_id: abi_decl_id.unwrap_or(DeclId::dummy()), })) } AbiMode::NonAbi => None, }, implementing_for, span: self.name.span(), call_path: CallPath::from(self.name.clone()), attributes: self.attributes.clone(), return_type: self.return_type.clone(), visibility: Visibility::Public, type_parameters: vec![], is_contract_call: matches!(abi_mode, AbiMode::ImplAbiFn(..)), where_clause: vec![], is_trait_method_dummy: true, is_type_check_finalized: true, kind: ty::TyFunctionDeclKind::Default, } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/type_alias.rs
sway-core/src/semantic_analysis/ast_node/declaration/type_alias.rs
use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{ parsed::{Declaration, TypeAliasDeclaration}, ty::TyTypeAliasDecl, }, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, TypeCheckFinalization, TypeCheckFinalizationContext, }, Engines, }; use sway_error::handler::{ErrorEmitted, Handler}; impl TyTypeAliasDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<TypeAliasDeclaration>, ) -> Result<(), ErrorEmitted> { let type_alias = engines.pe().get_type_alias(decl_id); ctx.insert_parsed_symbol( handler, engines, type_alias.name.clone(), Declaration::TypeAliasDeclaration(*decl_id), ) } } impl TypeCheckFinalization for TyTypeAliasDecl { fn type_check_finalize( &mut self, _handler: &Handler, _ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/enum.rs
sway-core/src/semantic_analysis/ast_node/declaration/enum.rs
use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{parsed::*, ty, CallPath}, semantic_analysis::*, type_system::*, Engines, }; use ast_elements::type_parameter::GenericTypeParameter; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Spanned; use symbol_collection_context::SymbolCollectionContext; impl ty::TyEnumDecl { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<EnumDeclaration>, ) -> Result<(), ErrorEmitted> { let enum_decl = engines.pe().get_enum(decl_id); let decl = Declaration::EnumDeclaration(*decl_id); ctx.insert_parsed_symbol(handler, engines, enum_decl.name.clone(), decl.clone())?; // create a namespace for the decl, used to create a scope for generics let _ = ctx.scoped(engines, enum_decl.span.clone(), Some(decl), |mut _ctx| { Ok(()) }); Ok(()) } pub fn type_check( handler: &Handler, mut ctx: TypeCheckContext, decl: EnumDeclaration, ) -> Result<Self, ErrorEmitted> { let EnumDeclaration { name, type_parameters, variants, span, attributes, visibility, .. } = decl; // create a namespace for the decl, used to create a scope for generics ctx.scoped(handler, Some(span.clone()), |ctx| { // Type check the type parameters. let new_type_parameters = GenericTypeParameter::type_check_type_params( handler, ctx.by_ref(), type_parameters, None, )?; // type check the variants let mut variants_buf = vec![]; for variant in variants { variants_buf.push( match ty::TyEnumVariant::type_check(handler, ctx.by_ref(), variant.clone()) { Ok(res) => res, Err(_) => continue, }, ); } let call_path = CallPath::ident_to_fullpath(name, ctx.namespace()); // create the enum decl let decl = ty::TyEnumDecl { call_path, generic_parameters: new_type_parameters, variants: variants_buf, span, attributes, visibility, }; Ok(decl) }) } } impl ty::TyEnumVariant { pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, variant: EnumVariant, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let mut type_argument = variant.type_argument; type_argument.type_id = ctx .resolve_type( handler, type_argument.type_id, &type_argument.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); Ok(ty::TyEnumVariant { name: variant.name.clone(), type_argument, tag: variant.tag, span: variant.span, attributes: variant.attributes, }) } } impl TypeCheckAnalysis for ty::TyEnumDecl { fn type_check_analyze( &self, _handler: &Handler, _ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { Ok(()) } } impl TypeCheckFinalization for ty::TyEnumDecl { fn type_check_finalize( &mut self, _handler: &Handler, _ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/trait_type.rs
sway-core/src/semantic_analysis/ast_node/declaration/trait_type.rs
use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{Span, Spanned}; use crate::{ decl_engine::parsed_id::ParsedDeclId, language::{ parsed::{self, Declaration, TraitTypeDeclaration}, ty::{self, TyTraitType}, }, semantic_analysis::{ symbol_collection_context::SymbolCollectionContext, TypeCheckAnalysis, TypeCheckAnalysisContext, TypeCheckContext, }, EnforceTypeArguments, Engines, }; impl ty::TyTraitType { pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, decl_id: &ParsedDeclId<TraitTypeDeclaration>, ) -> Result<(), ErrorEmitted> { let trait_type_decl = engines.pe().get_trait_type(decl_id); ctx.insert_parsed_symbol( handler, engines, trait_type_decl.name.clone(), Declaration::TraitTypeDeclaration(*decl_id), ) } pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, trait_type: parsed::TraitTypeDeclaration, ) -> Result<Self, ErrorEmitted> { let parsed::TraitTypeDeclaration { name, attributes, ty_opt, span, } = trait_type; let engines = ctx.engines(); let type_engine = engines.te(); let ty = if let Some(mut ty) = ty_opt { *ty.type_id_mut() = ctx .resolve_type( handler, ty.type_id(), &ty.span(), EnforceTypeArguments::No, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); Some(ty) } else { None }; if let Some(implementing_type) = ctx.self_type() { Ok(ty::TyTraitType { name, attributes, ty, implementing_type, span, }) } else { Err(handler.emit_err(CompileError::Internal("Self type not provided.", span))) } } /// Used to create a stubbed out constant when the constant fails to /// compile, preventing cascading namespace errors. pub(crate) fn error(engines: &Engines, decl: parsed::TraitTypeDeclaration) -> TyTraitType { let parsed::TraitTypeDeclaration { name, attributes, ty_opt, span, } = decl; TyTraitType { name, attributes, ty: ty_opt, implementing_type: engines.te().new_self_type(engines, Span::dummy()), span, } } } impl TypeCheckAnalysis for ty::TyTraitType { fn type_check_analyze( &self, _handler: &Handler, _ctx: &mut TypeCheckAnalysisContext, ) -> Result<(), ErrorEmitted> { Ok(()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/debug.rs
sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/debug.rs
use sway_error::handler::Handler; use sway_types::{BaseIdent, Named, Spanned}; use crate::{ decl_engine::DeclEngineGet, language::ty::{self, TyAstNode, TyDecl, TyEnumDecl, TyStructDecl}, Engines, TypeParameter, }; #[derive(Default)] pub struct DebugAutoImplInfo {} pub type DebugAutoImplContext<'a, 'b> = super::AutoImplContext<'a, 'b, DebugAutoImplInfo>; impl<'a, 'b> DebugAutoImplContext<'a, 'b> where 'a: 'b, { pub fn generate_debug_impl( &mut self, engines: &Engines, decl: &ty::TyDecl, ) -> Option<TyAstNode> { match decl { TyDecl::StructDecl(_) => self.auto_impl_debug_struct(engines, decl), TyDecl::EnumDecl(_) => self.auto_impl_debug_enum(engines, decl), _ => None, } } // checks if the current module is a dependency of the `debug` module. fn is_debug_dependency(&self) -> bool { // Dependencies of the debug library in std cannot have debug implemented for them. self.ctx.namespace.current_package_name().as_str() == "std" && matches!( self.ctx.namespace.current_module().name().as_str(), "codec" | "raw_slice" | "raw_ptr" | "ops" | "primitives" | "registers" | "flags" | "debug" ) } // Auto implements Debug for structs and returns their `AstNode`s. fn auto_impl_debug_struct(&mut self, engines: &Engines, decl: &TyDecl) -> Option<TyAstNode> { if self.is_debug_dependency() { return None; } let implementing_for_decl_id = decl.to_struct_decl(&Handler::default(), engines).unwrap(); let struct_decl = self.ctx.engines().de().get(&implementing_for_decl_id); let body = self.generate_fmt_struct_body(engines, &struct_decl); let code = self.generate_fmt_code(struct_decl.name(), &struct_decl.generic_parameters, body); let node = self.parse_impl_trait_to_ty_ast_node( engines, struct_decl.span().source_id(), &code, crate::build_config::DbgGeneration::None, ); node.ok() } fn generate_fmt_code( &self, name: &BaseIdent, type_parameters: &[TypeParameter], body: String, ) -> String { let type_parameters_declaration_expanded = self.generate_type_parameters_declaration_code(type_parameters, true); let type_parameters_declaration = self.generate_type_parameters_declaration_code(type_parameters, false); let type_parameters_constraints = self.generate_type_parameters_constraints_code(type_parameters, Some("Debug")); let name = name.as_raw_ident_str(); format!("#[allow(dead_code, deprecated)] impl{type_parameters_declaration_expanded} Debug for {name}{type_parameters_declaration}{type_parameters_constraints} {{ #[allow(dead_code, deprecated)] fn fmt(self, ref mut _f: Formatter) {{ {body} }} }}") } fn generate_fmt_struct_body(&self, _engines: &Engines, decl: &TyStructDecl) -> String { let mut fields = String::new(); for field in decl.fields.iter() { fields.push_str(&format!( ".field(\"{field_name}\", self.{field_name})\n", field_name = field.name.as_raw_ident_str(), )); } format!( "_f.debug_struct(\"{}\"){fields}.finish();", decl.name().as_raw_ident_str() ) } // Auto implements Debug for enums and returns their `AstNode`s. fn auto_impl_debug_enum(&mut self, engines: &Engines, decl: &TyDecl) -> Option<TyAstNode> { if self.is_debug_dependency() { return None; } let enum_decl_id = decl.to_enum_id(&Handler::default(), engines).unwrap(); let enum_decl = self.ctx.engines().de().get(&enum_decl_id); let body = self.generate_fmt_enum_body(engines, &enum_decl); let code = self.generate_fmt_code(enum_decl.name(), &enum_decl.generic_parameters, body); let node = self.parse_impl_trait_to_ty_ast_node( engines, enum_decl.span().source_id(), &code, crate::build_config::DbgGeneration::None, ); node.ok() } fn generate_fmt_enum_body(&self, engines: &Engines, decl: &TyEnumDecl) -> String { let enum_name = decl.call_path.suffix.as_raw_ident_str(); let arms = decl .variants .iter() .map(|variant| { let variant_name = variant.name.as_raw_ident_str(); if engines.te().get(variant.type_argument.type_id).is_unit() { format!( "{enum_name}::{variant_name} => {{ _f.print_str(\"{variant_name}\"); }}, \n" ) } else { format!( "{enum_name}::{variant_name}(value) => {{ _f.debug_tuple(\"{enum_name}\").field(value).finish(); }}, \n", ) } }) .collect::<String>(); format!("match self {{ {arms} }};") } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/mod.rs
sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/mod.rs
//! This module contains common infrastructure for generating and parsing auto-generated code. pub mod abi_encoding; pub mod debug; pub mod marker_traits; use std::ops::Deref; use crate::{ ast_elements::type_argument::GenericTypeArgument, build_config::DbgGeneration, engine_threading::SpannedWithEngines, language::{ parsed::{self, AstNodeContent, Declaration, FunctionDeclarationKind}, ty::{self, TyAstNode, TyDecl}, }, semantic_analysis::TypeCheckContext, BuildTarget, Engines, TypeInfo, TypeParameter, }; use sway_error::handler::Handler; use sway_parse::Parse; use sway_types::SourceId; /// Contains all information needed to auto-implement code for a certain feature. pub struct AutoImplContext<'a, 'b, I> where 'a: 'b, { ctx: &'b mut TypeCheckContext<'a>, /// Additional information, aside from `ctx`, needed to auto-implement a concrete feature. #[allow(dead_code)] info: I, } impl<'a, 'b, I> AutoImplContext<'a, 'b, I> where 'a: 'b, { pub fn new(ctx: &'b mut TypeCheckContext<'a>) -> Self where I: Default, { Self { ctx, info: I::default(), } } /// Parses `input` into the expected [Parse] type. /// The resulted [Parse] has source id set to autogenerated source id /// within the program represented by the `program_id`. fn parse<T>(&self, engines: &Engines, original_source_id: Option<&SourceId>, src: &str) -> T where T: Parse, { // Uncomment this to see what is being generated // println!("{}", src); let handler = <_>::default(); let autogenerated_source_id = original_source_id.as_ref().and_then(|source_id| { engines .se() .get_associated_autogenerated_source_id(source_id) }); let token_stream = sway_parse::lex(&handler, src.into(), 0, src.len(), autogenerated_source_id).unwrap(); let mut p = sway_parse::Parser::new(&handler, &token_stream, self.ctx.experimental); p.check_double_underscore = false; let r = p.parse(); assert!(!handler.has_errors(), "{handler:?}"); assert!(!handler.has_warnings(), "{handler:?}"); assert!(!p.has_errors()); assert!(!p.has_warnings()); r.unwrap() } /// Generates code like: /// if expanded_const_generics == false, `<A, B, u64, N>`. /// if expanded_const_generics == true, `<A, B, u64, const N: u64>`. fn generate_type_parameters_declaration_code( &self, type_parameters: &[TypeParameter], expanded_const_generics: bool, ) -> String { let mut code = String::new(); code.push('<'); for p in type_parameters { match p { TypeParameter::Type(p) => code.push_str(p.name.as_str()), TypeParameter::Const(p) => { if expanded_const_generics { code.push_str(&format!("const {}: u64", p.name.as_str())); } else { code.push_str(p.name.as_str()) } } } code.push_str(", "); } code.push('>'); code } /// Generates code like: ` where T: Eq + Hash + <extra_constraint>,\n`. fn generate_type_parameters_constraints_code( &self, type_parameters: &[TypeParameter], extra_constraint: Option<&str>, ) -> String { let mut code = String::new(); for p in type_parameters.iter() { let TypeParameter::Type(p) = p else { continue; }; if !p.trait_constraints.is_empty() || extra_constraint.is_some() { code.push_str(&format!( "{}: {},\n", p.name.as_str(), itertools::intersperse( extra_constraint .map_or(vec![], |extra_constraint| vec![extra_constraint]) .into_iter() .chain( p.trait_constraints .iter() .map(|x| x.trait_name.suffix.as_str()) ), " + " ) .collect::<String>() )); } } if !code.is_empty() { code = format!(" where {code}\n"); } code } /// Parses `code` that contains [Declaration::FunctionDeclaration] into the /// corresponding [TyAstNode]. pub fn parse_fn_to_ty_ast_node( &mut self, engines: &Engines, original_source_id: Option<&SourceId>, kind: FunctionDeclarationKind, code: &str, dbg_generation: DbgGeneration, ) -> Result<TyAstNode, Handler> { let mut ctx = crate::transform::to_parsed_lang::Context::new( crate::BuildTarget::Fuel, dbg_generation, self.ctx.experimental, "", // this is only used for self impl contracts ); let handler = Handler::default(); let item = self.parse(engines, original_source_id, code); let nodes = crate::transform::to_parsed_lang::item_to_ast_nodes( &mut ctx, &handler, engines, item, false, Some(kind), ) .unwrap(); let decl = match nodes[0].content { AstNodeContent::Declaration(Declaration::FunctionDeclaration(f)) => f, _ => unreachable!("unexpected node; expected `Declaration::FunctionDeclaration`"), }; if handler.has_errors() { panic!( "{:?} {:?}", handler, original_source_id.map(|x| engines.se().get_file_name(x)) ); } assert!(!handler.has_warnings(), "{handler:?}"); let mut ctx = self.ctx.by_ref(); let _r = TyDecl::collect( &handler, engines, ctx.collection_ctx, Declaration::FunctionDeclaration(decl), ); if handler.has_errors() { return Err(handler); } let r = ctx.scoped(&handler, None, |ctx| { TyDecl::type_check( &handler, &mut ctx.by_ref(), parsed::Declaration::FunctionDeclaration(decl), ) }); // Uncomment this to understand why an entry function was not generated // println!("{}, {:#?}", r.is_ok(), handler); let decl = r.map_err(|_| handler.clone())?; if handler.has_errors() || matches!(decl, TyDecl::ErrorRecovery(_, _)) { Err(handler) } else { Ok(TyAstNode { span: decl.span(engines), content: ty::TyAstNodeContent::Declaration(decl), }) } } /// Parses `code` that contains [Declaration::ImplSelfOrTrait] into the /// corresponding [TyAstNode]. fn parse_impl_trait_to_ty_ast_node( &mut self, engines: &Engines, original_source_id: Option<&SourceId>, code: &str, dbg_generation: DbgGeneration, ) -> Result<TyAstNode, Handler> { let mut ctx = crate::transform::to_parsed_lang::Context::new( BuildTarget::Fuel, dbg_generation, self.ctx.experimental, "", // this is only used for self impl contracts ); let handler = Handler::default(); let item = self.parse(engines, original_source_id, code); let nodes = crate::transform::to_parsed_lang::item_to_ast_nodes( &mut ctx, &handler, engines, item, false, None, ) .unwrap(); let decl = match nodes[0].content { AstNodeContent::Declaration(Declaration::ImplSelfOrTrait(f)) => f, _ => unreachable!("unexpected node; expected `Declaration::ImplSelfOrTrait`"), }; if handler.has_errors() { return Err(handler); } let mut ctx = self.ctx.by_ref(); let _r = TyDecl::collect( &handler, engines, ctx.collection_ctx, Declaration::ImplSelfOrTrait(decl), ); if handler.has_errors() { return Err(handler); } let r = ctx.scoped(&handler, None, |ctx| { TyDecl::type_check(&handler, ctx, Declaration::ImplSelfOrTrait(decl)) }); // Uncomment this to understand why auto impl failed for a type. // println!("{:#?}", handler); let decl = r.map_err(|_| handler.clone())?; if handler.has_errors() || matches!(decl, TyDecl::ErrorRecovery(_, _)) { Err(handler) } else { let impl_trait = if let TyDecl::ImplSelfOrTrait(impl_trait_id) = &decl { engines.de().get_impl_self_or_trait(&impl_trait_id.decl_id) } else { unreachable!(); }; // Insert trait implementation generated in the previous scope into the current scope. ctx.insert_trait_implementation( &handler, impl_trait.trait_name.clone(), impl_trait.trait_type_arguments.clone(), impl_trait.implementing_for.type_id, impl_trait.impl_type_parameters.clone(), &impl_trait.items, &impl_trait.span, impl_trait .trait_decl_ref .as_ref() .map(|decl_ref| decl_ref.decl_span().clone()), crate::namespace::IsImplSelf::No, crate::namespace::IsExtendingExistingImpl::No, crate::namespace::IsImplInterfaceSurface::No, ) .ok(); Ok(TyAstNode { span: decl.span(engines), content: ty::TyAstNodeContent::Declaration(decl), }) } } /// Returns the string representation of the type given by `ta`, as given in code /// by the `ta`'s span. /// /// The safest way would be to return a canonical fully qualified type path. /// We do not have a way to do this at the moment, so the best way is to use /// exactly what was typed by the user, to accommodate aliased imports. fn generate_type(engines: &Engines, ta: &GenericTypeArgument) -> String { match &*engines.te().get(ta.type_id) { // A special case for function return type. // When a function does not define a return type, the span points to the whole signature. TypeInfo::Tuple(v) if v.is_empty() => "()".into(), // Otherwise, take the type from the span. _ => ta.span.as_str().to_string(), } } } impl<'a, 'b, I> Deref for AutoImplContext<'a, 'b, I> where 'a: 'b, { type Target = TypeCheckContext<'a>; fn deref(&self) -> &Self::Target { self.ctx } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/marker_traits.rs
sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/marker_traits.rs
use crate::{ language::ty::{self, TyAstNode}, Engines, }; use sway_types::{Named, Spanned}; #[derive(Default)] pub struct MarkerTraitsAutoImplInfo {} pub type MarkerTraitsAutoImplContext<'a, 'b> = super::AutoImplContext<'a, 'b, MarkerTraitsAutoImplInfo>; impl<'a, 'b> MarkerTraitsAutoImplContext<'a, 'b> where 'a: 'b, { /// Generates an implementation of the `Enum` marker trait for the user defined enum /// represented by the `enum_decl`. pub fn generate_enum_marker_trait_impl( &mut self, engines: &Engines, enum_decl: &ty::TyEnumDecl, ) -> Option<TyAstNode> { self.auto_impl_empty_marker_trait_on_enum(engines, enum_decl, "Enum", None) } /// Generates an implementation of the `Error` marker trait for the user defined enum /// represented by the `enum_decl`. pub fn generate_error_type_marker_trait_impl_for_enum( &mut self, engines: &Engines, enum_decl: &ty::TyEnumDecl, ) -> Option<TyAstNode> { self.auto_impl_empty_marker_trait_on_enum(engines, enum_decl, "Error", Some("AbiEncode")) } fn auto_impl_empty_marker_trait_on_enum( &mut self, engines: &Engines, enum_decl: &ty::TyEnumDecl, marker_trait_name: &str, extra_constraint: Option<&str>, ) -> Option<TyAstNode> { if self.ctx.namespace.current_module().is_std_marker_module() { return None; } let type_parameters_declaration_expanded = self.generate_type_parameters_declaration_code(&enum_decl.generic_parameters, true); let type_parameters_declaration = self.generate_type_parameters_declaration_code(&enum_decl.generic_parameters, false); let type_parameters_constraints = self.generate_type_parameters_constraints_code( &enum_decl.generic_parameters, extra_constraint, ); let impl_marker_trait_code = format!( "#[allow(dead_code, deprecated)] impl{type_parameters_declaration_expanded} {marker_trait_name} for {}{type_parameters_declaration}{type_parameters_constraints} {{ }}", enum_decl.name().as_raw_ident_str() ); let impl_enum_node = self.parse_impl_trait_to_ty_ast_node( engines, enum_decl.span().source_id(), &impl_marker_trait_code, crate::build_config::DbgGeneration::None, ); impl_enum_node.ok() } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/abi_encoding.rs
sway-core/src/semantic_analysis/ast_node/declaration/auto_impl/abi_encoding.rs
use crate::{ asm_generation::fuel::compiler_constants::MISMATCHED_SELECTOR_REVERT_CODE, decl_engine::{DeclEngineGet, DeclId}, language::{ parsed::FunctionDeclarationKind, ty::{self, TyAstNode, TyDecl, TyEnumDecl, TyFunctionDecl, TyStructDecl}, Purity, }, Engines, TypeInfo, TypeParameter, }; use itertools::Itertools; use std::collections::BTreeMap; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{BaseIdent, Named, SourceId, Span, Spanned}; #[derive(Default)] pub struct AbiEncodingAutoImplInfo {} pub type AbiEncodingAutoImplContext<'a, 'b> = super::AutoImplContext<'a, 'b, AbiEncodingAutoImplInfo>; impl<'a, 'b> AbiEncodingAutoImplContext<'a, 'b> where 'a: 'b, { fn generate_abi_encode_code( &self, name: &BaseIdent, type_parameters: &[TypeParameter], body: String, is_trivial_body: &str, ) -> String { let type_parameters_declaration_expanded = self.generate_type_parameters_declaration_code(type_parameters, true); let type_parameters_declaration = self.generate_type_parameters_declaration_code(type_parameters, false); let type_parameters_constraints = self.generate_type_parameters_constraints_code(type_parameters, Some("AbiEncode")); let name = name.as_raw_ident_str(); format!("#[allow(dead_code, deprecated)] impl{type_parameters_declaration_expanded} AbiEncode for {name}{type_parameters_declaration}{type_parameters_constraints} {{ #[allow(dead_code, deprecated)] fn is_encode_trivial() -> bool {{ {is_trivial_body} }} #[allow(dead_code, deprecated)] fn abi_encode(self, buffer: Buffer) -> Buffer {{ {body} buffer }} }}") } fn generate_abi_decode_code( &self, name: &BaseIdent, type_parameters: &[TypeParameter], body: String, is_trivial_body: &str, ) -> String { let type_parameters_declaration_expanded = self.generate_type_parameters_declaration_code(type_parameters, true); let type_parameters_declaration = self.generate_type_parameters_declaration_code(type_parameters, false); let type_parameters_constraints = self.generate_type_parameters_constraints_code(type_parameters, Some("AbiDecode")); let name = name.as_raw_ident_str(); let buffer_arg = if body == "Self { }" { "_buffer" } else { "buffer" }; format!("#[allow(dead_code, deprecated)] impl{type_parameters_declaration_expanded} AbiDecode for {name}{type_parameters_declaration}{type_parameters_constraints} {{ #[allow(dead_code, deprecated)] fn is_decode_trivial() -> bool {{ {is_trivial_body} }} #[allow(dead_code, deprecated)] fn abi_decode(ref mut {buffer_arg}: BufferReader) -> Self {{ {body} }} }}") } fn generate_abi_encode_struct_body(&self, _engines: &Engines, decl: &TyStructDecl) -> String { let mut code = String::new(); for f in decl.fields.iter() { code.push_str(&format!( "let buffer = self.{field_name}.abi_encode(buffer);\n", field_name = f.name.as_raw_ident_str(), )); } code } fn generate_abi_decode_struct_body( &self, engines: &Engines, decl: &TyStructDecl, ) -> Option<String> { let mut code = String::new(); for f in decl.fields.iter() { code.push_str(&format!( "{field_name}: buffer.decode::<{field_type_name}>(),", field_name = f.name.as_raw_ident_str(), field_type_name = Self::generate_type(engines, &f.type_argument), )); } Some(format!("Self {{ {code} }}")) } fn generate_abi_decode_enum_body( &self, engines: &Engines, decl: &TyEnumDecl, ) -> Option<String> { let enum_name = decl.call_path.suffix.as_raw_ident_str(); let arms = decl.variants.iter() .map(|x| { let name = x.name.as_raw_ident_str(); Some(match &*engines.te().get(x.type_argument.type_id) { // unit TypeInfo::Tuple(fields) if fields.is_empty() => { format!("{} => {}::{}, \n", x.tag, enum_name, name) }, _ => { let variant_type_name = Self::generate_type(engines, &x.type_argument); format!("{tag_value} => {enum_name}::{variant_name}(buffer.decode::<{variant_type}>()), \n", tag_value = x.tag, enum_name = enum_name, variant_name = name, variant_type = variant_type_name ) } }) }) .collect::<Option<String>>()?; use std::fmt::Write; let mut code = String::new(); let _ = writeln!(&mut code, "let variant: u64 = buffer.decode::<u64>();"); let _ = writeln!(&mut code, "match variant {{ {arms} _ => __revert(0), }}"); Some(code) } fn generate_abi_encode_enum_body(&self, engines: &Engines, decl: &TyEnumDecl) -> String { if decl.variants.is_empty() { return "".into(); } let enum_name = decl.call_path.suffix.as_raw_ident_str(); let arms = decl .variants .iter() .map(|x| { let name = x.name.as_raw_ident_str(); if engines.te().get(x.type_argument.type_id).is_unit() { format!( "{enum_name}::{variant_name} => {{ {tag_value}u64.abi_encode(buffer) }}, \n", tag_value = x.tag, enum_name = enum_name, variant_name = name ) } else { format!( "{enum_name}::{variant_name}(value) => {{ let buffer = {tag_value}u64.abi_encode(buffer); let buffer = value.abi_encode(buffer); buffer }}, \n", tag_value = x.tag, enum_name = enum_name, variant_name = name, ) } }) .collect::<String>(); format!("let buffer = match self {{ {arms} }};") } // Auto implements AbiEncode and AbiDecode for structs and returns their `AstNode`s. fn auto_impl_abi_encode_and_decode_for_struct( &mut self, engines: &Engines, decl: &TyDecl, ) -> Option<(Option<TyAstNode>, Option<TyAstNode>)> { // Dependencies of the codec library in std cannot have abi encoding implemented for them. if self.ctx.namespace.current_package_name().as_str() == "std" && matches!( self.ctx.namespace.current_module().name().as_str(), "codec" | "raw_slice" | "raw_ptr" | "ops" | "primitives" | "registers" | "flags" ) { return Some((None, None)); } let implementing_for_decl_id = decl.to_struct_decl(&Handler::default(), engines).unwrap(); let struct_decl = self.ctx.engines().de().get(&implementing_for_decl_id); let fields_types = struct_decl .fields .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let mut is_encode_trivial = "__runtime_mem_id::<Self>() == __encoding_mem_id::<Self>()".to_string(); for field_type in fields_types { is_encode_trivial.push_str(" && "); is_encode_trivial.push_str(&format!("is_encode_trivial::<{}>()", field_type)); } let abi_encode_body = self.generate_abi_encode_struct_body(engines, &struct_decl); let abi_encode_code = self.generate_abi_encode_code( struct_decl.name(), &struct_decl.generic_parameters, abi_encode_body, &is_encode_trivial, ); let abi_encode_node = self.parse_impl_trait_to_ty_ast_node( engines, struct_decl.span().source_id(), &abi_encode_code, crate::build_config::DbgGeneration::None, ); let fields_types = struct_decl .fields .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let mut is_decode_trivial = "__runtime_mem_id::<Self>() == __encoding_mem_id::<Self>()".to_string(); for field_type in fields_types { is_decode_trivial.push_str(" && "); is_decode_trivial.push_str(&format!("is_decode_trivial::<{}>()", field_type)); } let abi_decode_body = self.generate_abi_decode_struct_body(engines, &struct_decl); let abi_decode_code = self.generate_abi_decode_code( struct_decl.name(), &struct_decl.generic_parameters, abi_decode_body?, &is_decode_trivial, ); let abi_decode_node = self.parse_impl_trait_to_ty_ast_node( engines, struct_decl.span().source_id(), &abi_decode_code, crate::build_config::DbgGeneration::None, ); Some((abi_encode_node.ok(), abi_decode_node.ok())) } fn auto_impl_abi_encode_and_decode_for_enum( &mut self, engines: &Engines, decl: &TyDecl, ) -> Option<(Option<TyAstNode>, Option<TyAstNode>)> { // Dependencies of the codec library in std cannot have abi encoding implemented for them. if self.ctx.namespace.current_package_name().as_str() == "std" && matches!( self.ctx.namespace.current_module().name().as_str(), "codec" | "raw_slice" | "raw_ptr" | "ops" | "primitives" | "registers" | "flags" ) { return Some((None, None)); } let enum_decl_id = decl.to_enum_id(&Handler::default(), engines).unwrap(); let enum_decl = self.ctx.engines().de().get(&enum_decl_id); let variant_types = enum_decl .variants .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let mut is_encode_trivial = "__runtime_mem_id::<Self>() == __encoding_mem_id::<Self>()".to_string(); for variant_type in variant_types { is_encode_trivial.push_str(" && "); is_encode_trivial.push_str(&format!("is_encode_trivial::<{}>()", variant_type)); } let abi_encode_body = self.generate_abi_encode_enum_body(engines, &enum_decl); let abi_encode_code = self.generate_abi_encode_code( enum_decl.name(), &enum_decl.generic_parameters, abi_encode_body, &is_encode_trivial, ); let abi_encode_node = self.parse_impl_trait_to_ty_ast_node( engines, enum_decl.span().source_id(), &abi_encode_code, crate::build_config::DbgGeneration::None, ); let abi_decode_body = self.generate_abi_decode_enum_body(engines, &enum_decl); let abi_decode_code = self.generate_abi_decode_code( enum_decl.name(), &enum_decl.generic_parameters, abi_decode_body?, "false", ); let abi_decode_node = self.parse_impl_trait_to_ty_ast_node( engines, enum_decl.span().source_id(), &abi_decode_code, crate::build_config::DbgGeneration::None, ); Some((abi_encode_node.ok(), abi_decode_node.ok())) } pub fn generate_abi_encode_and_decode_impls( &mut self, engines: &Engines, decl: &ty::TyDecl, ) -> (Option<TyAstNode>, Option<TyAstNode>) { match decl { TyDecl::StructDecl(_) => self .auto_impl_abi_encode_and_decode_for_struct(engines, decl) .unwrap_or((None, None)), TyDecl::EnumDecl(_) => self .auto_impl_abi_encode_and_decode_for_enum(engines, decl) .unwrap_or((None, None)), _ => (None, None), } } pub(crate) fn generate_contract_entry( &mut self, engines: &Engines, original_source_id: Option<&SourceId>, contract_fns: &[DeclId<TyFunctionDecl>], fallback_fn: Option<DeclId<TyFunctionDecl>>, handler: &Handler, ) -> Result<TyAstNode, ErrorEmitted> { let mut reads = false; let mut writes = false; // used to check for name collisions let mut contract_methods: BTreeMap<String, Vec<Span>> = <_>::default(); let mut arm_by_size = BTreeMap::<usize, String>::default(); // generate code let mut method_names = String::new(); for r in contract_fns { let decl = engines.de().get(r); // For contract methods, even if their names are raw identifiers, // we use just the name, because the generated methods will be prefixed // with `__contract_entry_`. let name = decl.name.as_str(); if !contract_methods.contains_key(name) { contract_methods.insert(name.to_string(), vec![]); } contract_methods .get_mut(name) .unwrap() .push(decl.name.span()); match decl.purity { Purity::Pure => {} Purity::Reads => reads = true, Purity::Writes => writes = true, Purity::ReadsWrites => { reads = true; writes = true; } } let args_types = decl .parameters .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let args_types = itertools::intersperse(args_types, ", ".into()).collect::<String>(); let args_types = if args_types.is_empty() { "()".into() } else { format!("({args_types},)") }; let expanded_args = itertools::intersperse( decl.parameters .iter() .enumerate() .map(|(i, _)| format!("args.{i}")), ", ".into(), ) .collect::<String>(); let return_type = Self::generate_type(engines, &decl.return_type); let method_name = decl.name.as_str(); let offset = if let Some(offset) = method_names.find(method_name) { offset } else { let offset = method_names.len(); method_names.push_str(method_name); offset }; let method_name_len = method_name.len(); let code = arm_by_size.entry(method_name.len()).or_default(); code.push_str(&format!(" let is_this_method = asm(r, ptr: _method_name_ptr, name: _method_names_ptr, len: {method_name_len}) {{ addi r name i{offset}; meq r ptr r len; r: bool }}; if is_this_method {{\n")); if args_types == "()" { code.push_str(&format!( "let _result = __contract_entry_{method_name}();\n" )); } else { code.push_str(&format!( "let args: {args_types} = decode_from_raw_ptr::<{args_types}>(_buffer_ptr); let _result: {return_type} = __contract_entry_{method_name}({expanded_args});\n" )); } if return_type == "()" { code.push_str("__contract_ret(asm() { zero: raw_ptr }, 0);"); } else { code.push_str(&format!("encode_and_return::<{return_type}>(&_result);")); } code.push_str("\n}\n"); } // check contract methods are unique // we need to allow manual_try_fold to avoid short-circuit and show // all errors. #[allow(clippy::manual_try_fold)] contract_methods .into_iter() .fold(Ok(()), |error, (_, spans)| { if spans.len() > 1 { Err(handler .emit_err(CompileError::MultipleContractsMethodsWithTheSameName { spans })) } else { error } })?; let fallback = if let Some(fallback_fn) = fallback_fn { let fallback_fn = engines.de().get(&fallback_fn); let return_type = Self::generate_type(engines, &fallback_fn.return_type); let method_name = fallback_fn.name.as_raw_ident_str(); match fallback_fn.purity { Purity::Pure => {} Purity::Reads => reads = true, Purity::Writes => writes = true, Purity::ReadsWrites => { reads = true; writes = true; } } format!("let result: raw_slice = encode::<{return_type}>({method_name}()); __contract_ret(result.ptr(), result.len::<u8>());") } else { // as the old encoding does format!("__revert({MISMATCHED_SELECTOR_REVERT_CODE});") }; let att = match (reads, writes) { (true, true) => "#[storage(read, write)]", (true, false) => "#[storage(read)]", (false, true) => "#[storage(write)]", (false, false) => "", }; let code = arm_by_size .iter() .map(|(len, code)| format!("if _method_len == {len} {{ {code} }}")) .join(""); let code = format!( "{att} pub fn __entry() {{ let _method_names = \"{method_names}\"; let _method_names_ptr = _method_names.as_ptr(); let mut _buffer_ptr = BufferReader::from_second_parameter(); let _method_name_ptr = BufferReader::from_first_parameter(); let mut _method_name = BufferReader {{ ptr: _method_name_ptr }}; let _method_len = _method_name.read::<u64>(); let _method_name_ptr = _method_name.ptr(); {code} {fallback} }}" ); let entry_fn = self.parse_fn_to_ty_ast_node( engines, original_source_id, FunctionDeclarationKind::Entry, &code, crate::build_config::DbgGeneration::None, ); match entry_fn { Ok(entry_fn) => Ok(entry_fn), Err(gen_handler) => { Self::check_impl_is_missing(handler, &gen_handler); Self::check_std_is_missing(handler, &gen_handler); Err(gen_handler.emit_err(CompileError::CouldNotGenerateEntry { span: Span::dummy(), })) } } } pub(crate) fn generate_predicate_entry( &mut self, engines: &Engines, decl: &TyFunctionDecl, handler: &Handler, ) -> Result<TyAstNode, ErrorEmitted> { let args_types = decl .parameters .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let args_types = itertools::intersperse(args_types, ", ".into()).collect::<String>(); let expanded_args = itertools::intersperse( decl.parameters .iter() .enumerate() .map(|(i, _)| format!("args.{i}")), ", ".into(), ) .collect::<String>(); let code = if args_types.is_empty() { "pub fn __entry() -> bool { main() }".to_string() } else { let args_types = format!("({args_types},)"); format!( "pub fn __entry() -> bool {{ let args: {args_types} = decode_predicate_data::<{args_types}>(); main({expanded_args}) }}" ) }; let entry_fn = self.parse_fn_to_ty_ast_node( engines, decl.span.source_id(), FunctionDeclarationKind::Entry, &code, crate::build_config::DbgGeneration::None, ); match entry_fn { Ok(entry_fn) => Ok(entry_fn), Err(gen_handler) => { Self::check_impl_is_missing(handler, &gen_handler); Self::check_std_is_missing(handler, &gen_handler); Err(gen_handler.emit_err(CompileError::CouldNotGenerateEntry { span: Span::dummy(), })) } } } // Check std is missing and give a more user-friendly error message. fn check_std_is_missing(handler: &Handler, gen_handler: &Handler) { let encode_not_found = gen_handler .find_error(|x| matches!(x, CompileError::SymbolNotFound { .. })) .is_some(); if encode_not_found { handler.emit_err(CompileError::CouldNotGenerateEntryMissingStd { span: Span::dummy(), }); } } // Check cannot encode or decode type fn check_impl_is_missing(handler: &Handler, gen_handler: &Handler) { let constraint_not_satisfied = gen_handler.find_error(|x| { matches!(x, CompileError::TraitConstraintNotSatisfied { trait_name, .. } if trait_name == "AbiEncode" || trait_name == "AbiDecode" && { true }) }); if let Some(constraint_not_satisfied) = constraint_not_satisfied { let ty = match constraint_not_satisfied { CompileError::TraitConstraintNotSatisfied { ty, .. } => ty, _ => unreachable!("unexpected error"), }; handler.emit_err(CompileError::CouldNotGenerateEntryMissingImpl { ty, span: Span::dummy(), }); } } pub(crate) fn generate_script_entry( &mut self, engines: &Engines, decl: &TyFunctionDecl, handler: &Handler, ) -> Result<TyAstNode, ErrorEmitted> { let args_types = decl .parameters .iter() .map(|x| Self::generate_type(engines, &x.type_argument)); let args_types = itertools::intersperse(args_types, ", ".into()).collect::<String>(); let args_types = if args_types.is_empty() { "()".into() } else { format!("({args_types},)") }; let expanded_args = itertools::intersperse( decl.parameters .iter() .enumerate() .map(|(i, _)| format!("args.{i}")), ", ".into(), ) .collect::<String>(); let return_type = Self::generate_type(engines, &decl.return_type); let return_encode = if return_type == "()" { "__contract_ret(0, 0)".to_string() } else { format!("encode_and_return::<{return_type}>(&_result)") }; let code = if args_types == "()" { format!( "pub fn __entry() -> ! {{ let _result: {return_type} = main(); {return_encode} }}" ) } else { format!( "pub fn __entry() -> ! {{ let args: {args_types} = decode_script_data::<{args_types}>(); let _result: {return_type} = main({expanded_args}); {return_encode} }}" ) }; let entry_fn = self.parse_fn_to_ty_ast_node( engines, decl.span.source_id(), FunctionDeclarationKind::Entry, &code, crate::build_config::DbgGeneration::None, ); match entry_fn { Ok(entry_fn) => Ok(entry_fn), Err(gen_handler) => { Self::check_std_is_missing(handler, &gen_handler); Self::check_impl_is_missing(handler, &gen_handler); Err(gen_handler.emit_err(CompileError::CouldNotGenerateEntry { span: Span::dummy(), })) } } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/declaration/function/function_parameter.rs
sway-core/src/semantic_analysis/ast_node/declaration/function/function_parameter.rs
use crate::{ language::{parsed::FunctionParameter, ty}, semantic_analysis::TypeCheckContext, type_system::*, }; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::Spanned; impl ty::TyFunctionParameter { pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, parameter: FunctionParameter, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let FunctionParameter { name, is_reference, is_mutable, mutability_span, mut type_argument, } = parameter; type_argument.type_id = ctx .resolve_type( handler, type_argument.type_id, &type_argument.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); type_argument.type_id.check_type_parameter_bounds( handler, ctx, &type_argument.span(), None, )?; let mutability = ty::VariableMutability::new_from_ref_mut(is_reference, is_mutable); if mutability == ty::VariableMutability::Mutable { return Err( handler.emit_err(CompileError::MutableParameterNotSupported { param_name: name.clone(), span: name.span(), }), ); } let typed_parameter = ty::TyFunctionParameter { name, is_reference, is_mutable, mutability_span, type_argument, }; Ok(typed_parameter) } pub(crate) fn type_check_interface_parameter( handler: &Handler, ctx: TypeCheckContext, parameter: &FunctionParameter, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let FunctionParameter { name, is_reference, is_mutable, mutability_span, type_argument, } = parameter; let mut new_type_argument = type_argument.clone(); new_type_argument.type_id = ctx .resolve_type( handler, type_argument.type_id, &type_argument.span, EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let typed_parameter = ty::TyFunctionParameter { name: name.clone(), is_reference: *is_reference, is_mutable: *is_mutable, mutability_span: mutability_span.clone(), type_argument: new_type_argument, }; Ok(typed_parameter) } pub fn insert_into_namespace(&self, handler: &Handler, mut ctx: TypeCheckContext) { let _ = ctx.insert_symbol( handler, self.name.clone(), ty::TyDecl::VariableDecl(Box::new(ty::TyVariableDecl { name: self.name.clone(), body: ty::TyExpression { expression: ty::TyExpressionVariant::FunctionParameter, return_type: self.type_argument.type_id, span: self.name.span(), }, mutability: ty::VariableMutability::new_from_ref_mut( self.is_reference, self.is_mutable, ), return_type: self.type_argument.type_id, type_ascription: self.type_argument.clone(), })), ); } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/intrinsic_function.rs
sway-core/src/semantic_analysis/ast_node/expression/intrinsic_function.rs
use ast_elements::type_argument::GenericTypeArgument; use sway_ast::intrinsics::Intrinsic; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::Span; use sway_types::{integer_bits::IntegerBits, Spanned}; use crate::{ engine_threading::*, language::{ parsed::{Expression, ExpressionKind}, ty::{self, TyIntrinsicFunctionKind}, Literal, }, semantic_analysis::TypeCheckContext, type_system::*, types::TypeMetadata, }; impl ty::TyIntrinsicFunctionKind { pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, kind_binding: TypeBinding<Intrinsic>, arguments: &[Expression], span: Span, ) -> Result<(Self, TypeId), ErrorEmitted> { let TypeBinding { inner: kind, type_arguments, .. } = kind_binding; let type_arguments = type_arguments.as_slice(); match kind { Intrinsic::SizeOfVal => { type_check_size_of_val(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::SizeOfType => { type_check_size_of_type(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::SizeOfStr => { type_check_size_of_type(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::IsReferenceType => { type_check_is_reference_type(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::IsStrArray => { type_check_is_reference_type(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::AssertIsStrArray => { type_check_assert_is_str_array(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::ToStrArray => type_check_to_str_array(handler, ctx, kind, arguments, span), Intrinsic::Eq | Intrinsic::Gt | Intrinsic::Lt => { type_check_cmp(handler, ctx, kind, arguments, span) } Intrinsic::Gtf => type_check_gtf(handler, ctx, kind, arguments, type_arguments, span), Intrinsic::AddrOf => type_check_addr_of(handler, ctx, kind, arguments, span), Intrinsic::StateClear => type_check_state_clear(handler, ctx, kind, arguments, span), Intrinsic::StateLoadWord => { type_check_state_load_word(handler, ctx, kind, arguments, span) } Intrinsic::StateStoreWord => { type_check_state_store_word(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::StateLoadQuad | Intrinsic::StateStoreQuad => { type_check_state_quad(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::Log => type_check_log(handler, ctx, kind, arguments, span), Intrinsic::Add | Intrinsic::Sub | Intrinsic::Mul | Intrinsic::Div | Intrinsic::Mod => { type_check_arith_binary_op(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::And | Intrinsic::Or | Intrinsic::Xor => { type_check_bitwise_binary_op(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::Lsh | Intrinsic::Rsh => { type_check_shift_binary_op(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::Revert => { type_check_revert(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::PtrAdd | Intrinsic::PtrSub => { type_check_ptr_ops(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::Smo => type_check_smo(handler, ctx, kind, arguments, type_arguments, span), Intrinsic::Not => type_check_not(handler, ctx, kind, arguments, type_arguments, span), Intrinsic::JmpMem => { type_check_jmp_mem(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::ContractCall => { type_check_contract_call(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::ContractRet => { type_check_contract_ret(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::EncodeBufferEmpty => { type_check_encode_buffer_empty(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::EncodeBufferAppend => { type_check_encode_append(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::EncodeBufferAsRawSlice => { type_check_encode_as_raw_slice(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::Slice => { type_check_slice(handler, ctx, kind, arguments, type_arguments, span) } Intrinsic::ElemAt => type_check_elem_at(arguments, handler, kind, span, ctx), Intrinsic::Transmute => { type_check_transmute(arguments, handler, kind, type_arguments, span, ctx) } Intrinsic::Dbg => { unreachable!("__dbg should not exist in the typed tree") } Intrinsic::RuntimeMemoryId => { type_check_runtime_memory_id(arguments, handler, kind, type_arguments, span, ctx) } Intrinsic::EncodingMemoryId => { type_check_encoding_memory_id(arguments, handler, kind, type_arguments, span, ctx) } Intrinsic::Alloc => { type_check_alloc(handler, ctx, kind, arguments, type_arguments, span) } } } } fn type_check_encoding_memory_id( arguments: &[Expression], handler: &Handler, kind: Intrinsic, type_arguments: &[GenericArgument], span: Span, ctx: TypeCheckContext, ) -> Result<(TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if !arguments.is_empty() { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 0, span, })); } if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let targ = &type_arguments[0]; let arg = ctx .resolve_type( handler, targ.type_id(), &targ.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| ctx.engines.te().id_of_error_recovery(err)); let mut final_type_arguments = type_arguments.to_vec(); *final_type_arguments[0].type_id_mut() = arg; let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: final_type_arguments, span: span.clone(), }; Ok((intrinsic_function, ctx.engines.te().id_of_u64())) } fn type_check_runtime_memory_id( arguments: &[Expression], handler: &Handler, kind: Intrinsic, type_arguments: &[GenericArgument], span: Span, ctx: TypeCheckContext, ) -> Result<(TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if !arguments.is_empty() { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 0, span, })); } if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let targ = &type_arguments[0]; let arg = ctx .resolve_type( handler, targ.type_id(), &targ.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| ctx.engines.te().id_of_error_recovery(err)); let mut final_type_arguments = type_arguments.to_vec(); *final_type_arguments[0].type_id_mut() = arg; let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: final_type_arguments, span: span.clone(), }; Ok((intrinsic_function, ctx.engines.te().id_of_u64())) } fn type_check_alloc( handler: &Handler, mut ctx: TypeCheckContext, kind: Intrinsic, arguments: &[Expression], type_arguments: &[GenericArgument], span: Span, ) -> Result<(TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 1, span, })); } let engines = ctx.engines(); // Type argument needs to be explicitly defined if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let alloc_type = ctx .resolve_type( handler, type_arguments[0].type_id(), &type_arguments[0].span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| engines.te().id_of_error_recovery(err)); // type check first argument, ensure that it is u64 let first_argument_typed_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(engines.te().id_of_u64()); ty::TyExpression::type_check(handler, ctx, &arguments[0])? }; let mut final_type_arguments = type_arguments.to_vec(); *final_type_arguments[0].type_id_mut() = alloc_type; Ok(( TyIntrinsicFunctionKind { kind, arguments: vec![first_argument_typed_expr], type_arguments: final_type_arguments, span, }, engines.te().id_of_raw_ptr(), )) } fn type_check_transmute( arguments: &[Expression], handler: &Handler, kind: Intrinsic, type_arguments: &[GenericArgument], span: Span, mut ctx: TypeCheckContext, ) -> Result<(TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 1, span, })); } let engines = ctx.engines(); // Both type arguments needs to be explicitly defined if type_arguments.len() != 2 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 2, span, })); } let src_type = ctx .resolve_type( handler, type_arguments[0].type_id(), &type_arguments[0].span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| engines.te().id_of_error_recovery(err)); let return_type = ctx .resolve_type( handler, type_arguments[1].type_id(), &type_arguments[1].span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| engines.te().id_of_error_recovery(err)); // type check first argument let arg_type = engines.te().new_unknown(); let first_argument_typed_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(arg_type); ty::TyExpression::type_check(handler, ctx, &arguments[0])? }; engines.te().unify( handler, engines, first_argument_typed_expr.return_type, src_type, &first_argument_typed_expr.span, "", || None, ); let mut final_type_arguments = type_arguments.to_vec(); *final_type_arguments[0].type_id_mut() = src_type; *final_type_arguments[1].type_id_mut() = return_type; Ok(( TyIntrinsicFunctionKind { kind, arguments: vec![first_argument_typed_expr], type_arguments: final_type_arguments, span, }, return_type, )) } fn type_check_elem_at( arguments: &[Expression], handler: &Handler, kind: Intrinsic, span: Span, ctx: TypeCheckContext, ) -> Result<(TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if arguments.len() != 2 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 2, span, })); } let type_engine = ctx.engines.te(); let engines = ctx.engines(); let mut ctx = ctx; // check first argument let first_argument_span = arguments[0].span.clone(); let first_argument_type = type_engine.new_unknown(); let first_argument_typed_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(first_argument_type); ty::TyExpression::type_check(handler, ctx, &arguments[0])? }; // first argument can be ref to array or ref to slice let elem_type = match &*type_engine.get(first_argument_type) { TypeInfo::Ref { referenced_type, to_mutable_value, } => match &*type_engine.get(referenced_type.type_id) { TypeInfo::Array(elem_ty, _) | TypeInfo::Slice(elem_ty) => { Some((*to_mutable_value, elem_ty.type_id)) } _ => None, }, _ => None, }; let Some((to_mutable_value, elem_type_type_id)) = elem_type else { return Err(handler.emit_err(CompileError::IntrinsicUnsupportedArgType { name: kind.to_string(), span: first_argument_span, hint: "Only references to arrays or slices can be used as argument here".to_string(), })); }; // index argument let index_typed_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.id_of_u64()); ty::TyExpression::type_check(handler, ctx, &arguments[1])? }; let return_type = type_engine.insert_ref_without_annotations(engines, to_mutable_value, elem_type_type_id); Ok(( TyIntrinsicFunctionKind { kind, arguments: vec![first_argument_typed_expr, index_typed_expr], type_arguments: vec![], span, }, return_type, )) } fn type_check_slice( handler: &Handler, mut ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if arguments.len() != 3 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 3, span, })); } let type_engine = ctx.engines.te(); let engines = ctx.engines(); // start index argument let start_ty_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.id_of_u64()); ty::TyExpression::type_check(handler, ctx, &arguments[1])? }; // end index argument let end_ty_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.id_of_u64()); ty::TyExpression::type_check(handler, ctx, &arguments[2])? }; // check first argument let first_argument_span = arguments[0].span.clone(); let first_argument_type = type_engine.new_unknown(); let first_argument_ty_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(first_argument_type); ty::TyExpression::type_check(handler, ctx, &arguments[0])? }; // statically check start and end, if possible let start_literal = start_ty_expr .expression .as_literal() .and_then(|x| x.cast_value_to_u64()); let end_literal = end_ty_expr .expression .as_literal() .and_then(|x| x.cast_value_to_u64()); if let (Some(start), Some(end)) = (start_literal, end_literal) { if start > end { return Err( handler.emit_err(CompileError::InvalidRangeEndGreaterThanStart { start, end, span, }), ); } } fn create_ref_to_slice( engines: &Engines, to_mutable_value: bool, elem_type_arg: GenericTypeArgument, ) -> TypeId { let type_engine = engines.te(); let slice_type_id = type_engine.insert_slice(engines, elem_type_arg); type_engine.insert_ref_without_annotations(engines, to_mutable_value, slice_type_id) } // first argument can be ref to array or ref to slice let err = CompileError::IntrinsicUnsupportedArgType { name: kind.to_string(), span: first_argument_span, hint: "Only references to arrays or slices can be used as argument here".to_string(), }; let r = match &*type_engine.get(first_argument_type) { TypeInfo::Ref { referenced_type, to_mutable_value, } => match &*type_engine.get(referenced_type.type_id) { TypeInfo::Array(elem_type_arg, array_len) if array_len.expr().as_literal_val().is_some() => { // SAFETY: safe by the guard above let array_len = array_len .expr() .as_literal_val() .expect("unexpected non literal array length") as u64; if let Some(v) = start_literal { if v > array_len { return Err(handler.emit_err(CompileError::ArrayOutOfBounds { index: v, count: array_len, span, })); } } if let Some(v) = end_literal { if v > array_len { return Err(handler.emit_err(CompileError::ArrayOutOfBounds { index: v, count: array_len, span, })); } } Some(( TyIntrinsicFunctionKind { kind, arguments: vec![first_argument_ty_expr, start_ty_expr, end_ty_expr], type_arguments: vec![], span, }, create_ref_to_slice(engines, *to_mutable_value, elem_type_arg.clone()), )) } TypeInfo::Slice(elem_type_arg) => Some(( TyIntrinsicFunctionKind { kind, arguments: vec![first_argument_ty_expr, start_ty_expr, end_ty_expr], type_arguments: vec![], span, }, create_ref_to_slice(engines, *to_mutable_value, elem_type_arg.clone()), )), _ => None, }, _ => None, }; match r { Some(r) => Ok(r), None => Err(handler.emit_err(err)), } } fn type_check_encode_as_raw_slice( handler: &Handler, mut ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let buffer_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.new_unknown()); ty::TyExpression::type_check(handler, ctx, &arguments[0].clone())? }; let kind = ty::TyIntrinsicFunctionKind { kind, arguments: vec![buffer_expr], type_arguments: vec![], span, }; Ok((kind, type_engine.id_of_raw_slice())) } fn type_check_encode_buffer_empty( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if !arguments.is_empty() { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 0, span, })); } let kind = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: vec![], span, }; Ok((kind, get_encoding_buffer_type(ctx.engines()))) } /// Returns the [TypeId] of the buffer type used in encoding: `(raw_ptr, u64, u64)`. /// The buffer type is a shareable [TypeInfo::Tuple], so it will be inserted into /// the [TypeEngine] only once, when this method is called for the first time. fn get_encoding_buffer_type(engines: &Engines) -> TypeId { let type_engine = engines.te(); type_engine.insert_tuple_without_annotations( engines, vec![ type_engine.id_of_raw_ptr(), type_engine.id_of_u64(), type_engine.id_of_u64(), ], ) } fn type_check_encode_append( handler: &Handler, mut ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { if arguments.len() != 2 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 2, span, })); } let type_engine = ctx.engines.te(); let engines = ctx.engines(); let buffer_type = get_encoding_buffer_type(engines); let buffer_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(buffer_type); ty::TyExpression::type_check(handler, ctx, &arguments[0])? }; let item_span = arguments[1].span.clone(); let item_type = type_engine.new_unknown(); let item_expr = { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(item_type); ty::TyExpression::type_check(handler, ctx, &arguments[1])? }; // only supported types if item_type.is_concrete(engines, TreatNumericAs::Abstract) { match &*engines.te().get(item_type) { TypeInfo::Boolean | TypeInfo::UnsignedInteger(IntegerBits::Eight) | TypeInfo::UnsignedInteger(IntegerBits::Sixteen) | TypeInfo::UnsignedInteger(IntegerBits::ThirtyTwo) | TypeInfo::UnsignedInteger(IntegerBits::SixtyFour) | TypeInfo::UnsignedInteger(IntegerBits::V256) | TypeInfo::B256 | TypeInfo::StringArray(_) | TypeInfo::StringSlice | TypeInfo::RawUntypedSlice => {} _ => { return Err( handler.emit_err(CompileError::EncodingUnsupportedType { span: item_span }) ) } }; } let kind = ty::TyIntrinsicFunctionKind { kind, arguments: vec![buffer_expr, item_expr], type_arguments: vec![], span, }; Ok((kind, buffer_type)) } /// Signature: `__not(val: u64) -> u64` /// Description: Return the bitwise negation of the operator. /// Constraints: None. fn type_check_not( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); if arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 1, span, })); } let return_type = type_engine.new_unknown(); let mut ctx = ctx.with_help_text("").with_type_annotation(return_type); let operand = &arguments[0]; let operand_expr = ty::TyExpression::type_check(handler, ctx.by_ref(), operand)?; let t_arc = engines.te().get(operand_expr.return_type); let t = &*t_arc; match t { TypeInfo::B256 | TypeInfo::UnsignedInteger(_) | TypeInfo::Numeric => Ok(( ty::TyIntrinsicFunctionKind { kind, arguments: vec![operand_expr], type_arguments: vec![], span, }, return_type, )), _ => Err(handler.emit_err(CompileError::TypeError( sway_error::type_error::TypeError::MismatchedType { expected: "unsigned integer or b256".into(), received: engines.help_out(return_type).to_string(), help_text: "Incorrect argument type".into(), span, }, ))), } } /// Signature: `__size_of_val<T>(val: T) -> u64` /// Description: Return the size of type `T` in bytes. /// Constraints: None. fn type_check_size_of_val( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], _type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); if arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 1, span, })); } let ctx = ctx .with_help_text("") .with_type_annotation(type_engine.new_unknown()); let exp = ty::TyExpression::type_check(handler, ctx, &arguments[0])?; let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![exp], type_arguments: vec![], span: span.clone(), }; Ok((intrinsic_function, type_engine.id_of_u64())) } /// Signature: `__size_of<T>() -> u64` /// Description: Return the size of type `T` in bytes. /// Constraints: None. fn type_check_size_of_type( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); if !arguments.is_empty() { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(), expected: 0, span, })); } if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let targ = type_arguments[0].clone(); let initial_type_info = type_engine .to_typeinfo(targ.type_id(), &targ.span()) .map_err(|e| handler.emit_err(e.into())) .unwrap_or_else(TypeInfo::ErrorRecovery); let initial_type_id = type_engine.insert(engines, initial_type_info, targ.span().source_id()); let type_id = ctx .resolve_type( handler, initial_type_id, &targ.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: vec![GenericArgument::Type(GenericTypeArgument { type_id, initial_type_id, span: targ.span(), call_path_tree: targ .as_type_argument() .unwrap() .call_path_tree .as_ref() .cloned(), })], span, }; Ok((intrinsic_function, type_engine.id_of_u64())) } /// Signature: `__is_reference_type<T>() -> bool` /// Description: Returns `true` if `T` is a _reference type_ and `false` otherwise. /// Constraints: None. fn type_check_is_reference_type( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, _arguments: &[Expression], type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let targ = type_arguments[0].clone(); let initial_type_info = type_engine .to_typeinfo(targ.type_id(), &targ.span()) .map_err(|e| handler.emit_err(e.into())) .unwrap_or_else(TypeInfo::ErrorRecovery); let initial_type_id = type_engine.insert(engines, initial_type_info, targ.span().source_id()); let type_id = ctx .resolve_type( handler, initial_type_id, &targ.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: vec![GenericArgument::Type(GenericTypeArgument { type_id, initial_type_id, span: targ.span(), call_path_tree: targ .as_type_argument() .unwrap() .call_path_tree .as_ref() .cloned(), })], span, }; Ok((intrinsic_function, type_engine.id_of_bool())) } /// Signature: `__assert_is_str_array<T>()` /// Description: Throws a compile error if `T` is not of type str. /// Constraints: None. fn type_check_assert_is_str_array( handler: &Handler, ctx: TypeCheckContext, kind: sway_ast::Intrinsic, _arguments: &[Expression], type_arguments: &[GenericArgument], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); if type_arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumTArgs { name: kind.to_string(), expected: 1, span, })); } let targ = type_arguments[0].clone(); let initial_type_info = type_engine .to_typeinfo(targ.type_id(), &targ.span()) .map_err(|e| handler.emit_err(e.into())) .unwrap_or_else(TypeInfo::ErrorRecovery); let initial_type_id = type_engine.insert(engines, initial_type_info, targ.span().source_id()); let type_id = ctx .resolve_type( handler, initial_type_id, &targ.span(), EnforceTypeArguments::Yes, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); let intrinsic_function = ty::TyIntrinsicFunctionKind { kind, arguments: vec![], type_arguments: vec![GenericArgument::Type(GenericTypeArgument { type_id, initial_type_id, span: targ.span(), call_path_tree: targ .as_type_argument() .unwrap() .call_path_tree .as_ref() .cloned(), })], span, }; Ok((intrinsic_function, type_engine.id_of_unit())) } fn type_check_to_str_array( handler: &Handler, mut ctx: TypeCheckContext, kind: sway_ast::Intrinsic, arguments: &[Expression], span: Span, ) -> Result<(ty::TyIntrinsicFunctionKind, TypeId), ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); if arguments.len() != 1 { return Err(handler.emit_err(CompileError::IntrinsicIncorrectNumArgs { name: kind.to_string(),
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs
mod constant_expression; mod enum_instantiation; mod function_application; mod if_expression; mod lazy_operator; mod method_application; mod struct_field_access; mod struct_instantiation; mod tuple_index_access; mod unsafe_downcast; use self::constant_expression::instantiate_constant_expression; pub(crate) use self::{ enum_instantiation::*, function_application::*, if_expression::*, lazy_operator::*, method_application::*, struct_field_access::*, struct_instantiation::*, tuple_index_access::*, unsafe_downcast::*, }; use crate::{ asm_lang::{virtual_ops::VirtualOp, virtual_register::VirtualRegister}, decl_engine::*, language::{ parsed::*, ty::{ self, GetDeclIdent, StructAccessInfo, TyCodeBlock, TyDecl, TyExpression, TyExpressionVariant, TyImplItem, TyReassignmentTarget, VariableMutability, }, *, }, namespace::{IsExtendingExistingImpl, IsImplInterfaceSurface, IsImplSelf, TraitMap}, semantic_analysis::{expression::ReachableReport, *}, transform::to_parsed_lang::type_name_to_type_info_opt, type_system::*, Engines, }; use ast_elements::{type_argument::GenericTypeArgument, type_parameter::ConstGenericExpr}; use ast_node::declaration::{insert_supertraits_into_namespace, SupertraitOf}; use either::Either; use indexmap::IndexMap; use namespace::{LexicalScope, Module, ResolvedDeclaration}; use rustc_hash::FxHashSet; use std::collections::{BTreeMap, HashMap, VecDeque}; use sway_ast::intrinsics::Intrinsic; use sway_error::{ convert_parse_tree_error::ConvertParseTreeError, error::{CompileError, StructFieldUsageContext}, handler::{ErrorEmitted, Handler}, warning::{CompileWarning, Warning}, }; use sway_types::{integer_bits::IntegerBits, u256::U256, BaseIdent, Ident, Named, Span, Spanned}; use symbol_collection_context::SymbolCollectionContext; use type_resolve::{resolve_call_path, VisibilityCheck}; #[allow(clippy::too_many_arguments)] impl ty::TyExpression { pub(crate) fn std_ops_eq( handler: &Handler, ctx: TypeCheckContext, arguments: Vec<ty::TyExpression>, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let ctx = ctx.with_type_annotation(type_engine.id_of_bool()); Self::std_ops(handler, ctx, OpVariant::Equals, arguments, span) } pub(crate) fn std_ops_neq( handler: &Handler, ctx: TypeCheckContext, arguments: Vec<ty::TyExpression>, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let ctx = ctx.with_type_annotation(type_engine.id_of_bool()); Self::std_ops(handler, ctx, OpVariant::NotEquals, arguments, span) } fn std_ops( handler: &Handler, mut ctx: TypeCheckContext, op_variant: OpVariant, arguments: Vec<ty::TyExpression>, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let decl_engine = ctx.engines.de(); let call_path = CallPath { prefixes: vec![ Ident::new_with_override("std".into(), span.clone()), Ident::new_with_override("ops".into(), span.clone()), ], suffix: Op { op_variant, span: span.clone(), } .to_method_name(), callpath_type: CallPathType::Full, }; let mut method_name_binding = TypeBinding { inner: MethodName::FromTrait { call_path: call_path.clone(), }, type_arguments: TypeArgs::Regular(vec![]), span: call_path.span(), }; let arguments = VecDeque::from(arguments); let arguments_types = arguments.iter().map(|a| a.return_type).collect::<Vec<_>>(); let (mut decl_ref, _) = resolve_method_name( handler, ctx.by_ref(), &method_name_binding, &arguments_types, )?; decl_ref = monomorphize_method( handler, ctx, decl_ref.clone(), method_name_binding.type_arguments.to_vec_mut(), BTreeMap::new(), )?; let method = decl_engine.get_function(&decl_ref); // check that the number of parameters and the number of the arguments is the same check_function_arguments_arity(handler, arguments.len(), &method, &call_path, false)?; let return_type = &method.return_type; let args_and_names = method .parameters .iter() .zip(arguments) .map(|(param, arg)| (param.name.clone(), arg)) .collect::<Vec<(_, _)>>(); let exp = ty::TyExpression { expression: ty::TyExpressionVariant::FunctionApplication { call_path, arguments: args_and_names, fn_ref: decl_ref, selector: None, type_binding: None, method_target: None, contract_call_params: IndexMap::new(), contract_caller: None, }, return_type: return_type.type_id, span, }; Ok(exp) } pub(crate) fn collect( handler: &Handler, engines: &Engines, ctx: &mut SymbolCollectionContext, expr: &Expression, ) -> Result<(), ErrorEmitted> { match &expr.kind { ExpressionKind::Error(_, _) => {} ExpressionKind::Literal(_) => {} ExpressionKind::AmbiguousPathExpression(expr) => { expr.args .iter() .map(|arg_expr| Self::collect(handler, engines, ctx, arg_expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::FunctionApplication(expr) => { expr.arguments .iter() .map(|arg_expr| Self::collect(handler, engines, ctx, arg_expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::LazyOperator(expr) => { Self::collect(handler, engines, ctx, &expr.lhs)?; Self::collect(handler, engines, ctx, &expr.rhs)?; } ExpressionKind::AmbiguousVariableExpression(_) => {} ExpressionKind::Variable(_) => {} ExpressionKind::Tuple(exprs) => { exprs .iter() .map(|expr| Self::collect(handler, engines, ctx, expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::TupleIndex(expr) => { Self::collect(handler, engines, ctx, &expr.prefix)?; } ExpressionKind::Array(ArrayExpression::Explicit { contents, .. }) => { contents .iter() .map(|expr| Self::collect(handler, engines, ctx, expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::Array(ArrayExpression::Repeat { value, length }) => { Self::collect(handler, engines, ctx, value)?; Self::collect(handler, engines, ctx, length)?; } ExpressionKind::Struct(expr) => { expr.fields .iter() .map(|field| Self::collect(handler, engines, ctx, &field.value)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::CodeBlock(code_block) => { TyCodeBlock::collect(handler, engines, ctx, code_block)? } ExpressionKind::If(if_expr) => { Self::collect(handler, engines, ctx, &if_expr.condition)?; Self::collect(handler, engines, ctx, &if_expr.then)?; if let Some(r#else) = &if_expr.r#else { Self::collect(handler, engines, ctx, r#else)? } } ExpressionKind::Match(expr) => { Self::collect(handler, engines, ctx, &expr.value)?; expr.branches .iter() .map(|branch| { // create a new namespace for this branch result ctx.scoped(engines, branch.span.clone(), None, |scoped_ctx| { Self::collect(handler, engines, scoped_ctx, &branch.result) }) .0 }) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::Asm(_) => {} ExpressionKind::MethodApplication(expr) => { expr.arguments .iter() .map(|expr| Self::collect(handler, engines, ctx, expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::Subfield(expr) => { Self::collect(handler, engines, ctx, &expr.prefix)?; } ExpressionKind::DelineatedPath(expr) => { if let Some(expr_args) = &expr.args { expr_args .iter() .map(|arg_expr| Self::collect(handler, engines, ctx, arg_expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } } ExpressionKind::AbiCast(expr) => { Self::collect(handler, engines, ctx, &expr.address)?; } ExpressionKind::ArrayIndex(expr) => { Self::collect(handler, engines, ctx, &expr.prefix)?; Self::collect(handler, engines, ctx, &expr.index)?; } ExpressionKind::StorageAccess(_) => {} ExpressionKind::IntrinsicFunction(expr) => { expr.arguments .iter() .map(|arg_expr| Self::collect(handler, engines, ctx, arg_expr)) .collect::<Result<Vec<_>, ErrorEmitted>>()?; } ExpressionKind::WhileLoop(expr) => { Self::collect(handler, engines, ctx, &expr.condition)?; TyCodeBlock::collect(handler, engines, ctx, &expr.body)? } ExpressionKind::ForLoop(expr) => { Self::collect(handler, engines, ctx, &expr.desugared)?; } ExpressionKind::Break => {} ExpressionKind::Continue => {} ExpressionKind::Reassignment(expr) => { match &expr.lhs { ReassignmentTarget::ElementAccess(expr) => { Self::collect(handler, engines, ctx, expr)?; } ReassignmentTarget::Deref(expr) => { Self::collect(handler, engines, ctx, expr)?; } } Self::collect(handler, engines, ctx, &expr.rhs)?; } ExpressionKind::ImplicitReturn(expr) => Self::collect(handler, engines, ctx, expr)?, ExpressionKind::Return(expr) => { Self::collect(handler, engines, ctx, expr)?; } ExpressionKind::Panic(expr) => { Self::collect(handler, engines, ctx, expr)?; } ExpressionKind::Ref(expr) => { Self::collect(handler, engines, ctx, &expr.value)?; } ExpressionKind::Deref(expr) => { Self::collect(handler, engines, ctx, expr)?; } } Ok(()) } pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, expr: &Expression, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); let expr_span = expr.span(); let span = expr_span.clone(); let res = match &expr.kind { // We've already emitted an error for the `::Error` case. ExpressionKind::Error(_, err) => Ok(ty::TyExpression::error(*err, span, engines)), ExpressionKind::Literal(lit) => { Ok(Self::type_check_literal(engines, lit.clone(), span)) } ExpressionKind::AmbiguousVariableExpression(name) => { if matches!( ctx.resolve_symbol(&Handler::default(), name).ok(), Some(ty::TyDecl::EnumVariantDecl { .. }) ) { let call_path = CallPath { prefixes: vec![], suffix: name.clone(), callpath_type: CallPathType::Ambiguous, }; Self::type_check_delineated_path( handler, ctx.by_ref(), TypeBinding { span: call_path.span(), inner: QualifiedCallPath { call_path, qualified_path_root: None, }, type_arguments: TypeArgs::Regular(vec![]), }, span, None, ) } else { Self::type_check_variable_expression(handler, ctx.by_ref(), name.clone(), span) } } ExpressionKind::Variable(name) => { Self::type_check_variable_expression(handler, ctx.by_ref(), name.clone(), span) } ExpressionKind::FunctionApplication(function_application_expression) => { let FunctionApplicationExpression { call_path_binding, resolved_call_path_binding: _, ref arguments, } = *function_application_expression.clone(); Self::type_check_function_application( handler, ctx.by_ref(), call_path_binding, arguments, span, ) } ExpressionKind::LazyOperator(LazyOperatorExpression { op, lhs, rhs }) => { let ctx = ctx.by_ref().with_type_annotation(type_engine.id_of_bool()); Self::type_check_lazy_operator(handler, ctx, op.clone(), lhs, rhs, span) } ExpressionKind::CodeBlock(contents) => { Self::type_check_code_block(handler, ctx.by_ref(), contents, span) } // TODO: If _condition_ is constant, evaluate it and compile this to an // expression with only one branch. Think at which stage to do it because // the same optimization should be done on desugared match expressions. ExpressionKind::If(IfExpression { condition, then, r#else, }) => Self::type_check_if_expression( handler, ctx.by_ref().with_help_text(""), *condition.clone(), *then.clone(), r#else.as_ref().map(|e| *e.clone()), span, ), ExpressionKind::Match(MatchExpression { value, branches }) => { Self::type_check_match_expression( handler, ctx.by_ref().with_help_text(""), value, branches.clone(), span, ) } ExpressionKind::Asm(asm) => { Self::type_check_asm_expression(handler, ctx.by_ref(), *asm.clone(), span) } ExpressionKind::Struct(struct_expression) => struct_instantiation( handler, ctx.by_ref(), struct_expression.call_path_binding.clone(), &struct_expression.fields, span, ), ExpressionKind::Subfield(SubfieldExpression { prefix, field_to_access, }) => Self::type_check_subfield_expression( handler, ctx.by_ref(), prefix, span, field_to_access.clone(), ), ExpressionKind::MethodApplication(method_application_expression) => { let MethodApplicationExpression { method_name_binding, contract_call_params, ref arguments, } = *method_application_expression.clone(); type_check_method_application( handler, ctx.by_ref(), method_name_binding, contract_call_params, arguments, span, ) } ExpressionKind::Tuple(ref fields) => { Self::type_check_tuple(handler, ctx.by_ref(), fields, span) } ExpressionKind::TupleIndex(TupleIndexExpression { prefix, index, index_span, }) => Self::type_check_tuple_index( handler, ctx.by_ref(), *prefix.clone(), *index, index_span.clone(), span, ), ExpressionKind::AmbiguousPathExpression(e) => { let AmbiguousPathExpression { call_path_binding, ref args, qualified_path_root, } = *e.clone(); Self::type_check_ambiguous_path( handler, ctx.by_ref(), call_path_binding, span, args, qualified_path_root, ) } ExpressionKind::DelineatedPath(delineated_path_expression) => { let DelineatedPathExpression { call_path_binding, args, } = *delineated_path_expression.clone(); Self::type_check_delineated_path( handler, ctx.by_ref(), call_path_binding, span, args.as_deref(), ) } ExpressionKind::AbiCast(abi_cast_expression) => { let AbiCastExpression { abi_name, address } = &**abi_cast_expression; Self::type_check_abi_cast(handler, ctx.by_ref(), abi_name.clone(), address, span) } ExpressionKind::Array(ArrayExpression::Explicit { contents, .. }) => { Self::type_check_array_explicit(handler, ctx.by_ref(), contents, span) } ExpressionKind::Array(ArrayExpression::Repeat { value, length }) => { Self::type_check_array_repeat(handler, ctx.by_ref(), value, length, span) } ExpressionKind::ArrayIndex(ArrayIndexExpression { prefix, index }) => { let ctx = ctx .by_ref() .with_type_annotation(type_engine.new_unknown()) .with_help_text(""); Self::type_check_array_index(handler, ctx, prefix, index, span) } ExpressionKind::StorageAccess(StorageAccessExpression { namespace_names, field_names, storage_keyword_span, }) => { let ctx = ctx .by_ref() .with_type_annotation(type_engine.new_unknown()) .with_help_text(""); Self::type_check_storage_access( handler, ctx, namespace_names, field_names, storage_keyword_span.clone(), &span, ) } ExpressionKind::IntrinsicFunction(IntrinsicFunctionExpression { kind_binding, ref arguments, .. }) => Self::type_check_intrinsic_function( handler, ctx.by_ref(), kind_binding.clone(), arguments, span, ), ExpressionKind::WhileLoop(WhileLoopExpression { condition, body, is_desugared_for_loop, }) => Self::type_check_while_loop( handler, ctx.by_ref(), condition, body, *is_desugared_for_loop, span, ), ExpressionKind::ForLoop(ForLoopExpression { desugared }) => { Self::type_check_for_loop(handler, ctx.by_ref(), desugared) } ExpressionKind::Break => { let expr = ty::TyExpression { expression: ty::TyExpressionVariant::Break, return_type: type_engine.id_of_never(), span, }; Ok(expr) } ExpressionKind::Continue => { let expr = ty::TyExpression { expression: ty::TyExpressionVariant::Continue, return_type: type_engine.id_of_never(), span, }; Ok(expr) } ExpressionKind::Reassignment(ReassignmentExpression { lhs, rhs }) => { Self::type_check_reassignment(handler, ctx.by_ref(), lhs.clone(), rhs, span) } ExpressionKind::ImplicitReturn(expr) => { let ctx = ctx .by_ref() .with_help_text("Implicit return must match up with block's type."); let expr_span = expr.span(); let expr = ty::TyExpression::type_check(handler, ctx, expr) .unwrap_or_else(|err| ty::TyExpression::error(err, expr_span, engines)); let typed_expr = ty::TyExpression { return_type: expr.return_type, expression: ty::TyExpressionVariant::ImplicitReturn(Box::new(expr)), span, }; Ok(typed_expr) } ExpressionKind::Return(expr) => { let function_type_annotation = ctx.function_type_annotation(); let ctx = ctx .by_ref() .with_type_annotation(function_type_annotation) .with_help_text( "Return expression must return the declared function return type.", ); let expr_span = expr.span(); let expr = ty::TyExpression::type_check(handler, ctx, expr) .unwrap_or_else(|err| ty::TyExpression::error(err, expr_span, engines)); let typed_expr = ty::TyExpression { expression: ty::TyExpressionVariant::Return(Box::new(expr)), return_type: type_engine.id_of_never(), span, }; Ok(typed_expr) } ExpressionKind::Panic(expr) => { type_check_panic(handler, ctx.by_ref(), engines, expr, span) } ExpressionKind::Ref(RefExpression { to_mutable_value, value, }) => Self::type_check_ref(handler, ctx.by_ref(), *to_mutable_value, value, span), ExpressionKind::Deref(expr) => { Self::type_check_deref(handler, ctx.by_ref(), expr, span) } }; let mut typed_expression = res?; // if the return type cannot be cast into the annotation type then it is a type error ctx.unify_with_type_annotation(handler, typed_expression.return_type, &expr_span); // The annotation may result in a cast, which is handled in the type engine. typed_expression.return_type = ctx .resolve_type( handler, typed_expression.return_type, &expr_span, EnforceTypeArguments::No, None, ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); // Literals of type Numeric can now be resolved if typed_expression.return_type is // an UnsignedInteger or a Numeric if let ty::TyExpressionVariant::Literal(lit) = typed_expression.clone().expression { if let Literal::Numeric(_) = lit { match &*type_engine.get(typed_expression.return_type) { TypeInfo::UnsignedInteger(_) | TypeInfo::Numeric => { typed_expression = Self::resolve_numeric_literal( handler, ctx, lit, expr_span, typed_expression.return_type, )? } _ => {} } } } Ok(typed_expression) } fn type_check_literal(engines: &Engines, lit: Literal, span: Span) -> ty::TyExpression { let type_engine = engines.te(); let return_type = match &lit { Literal::String(_) => type_engine.id_of_string_slice(), Literal::Numeric(_) => type_engine.new_numeric(), Literal::U8(_) => type_engine.id_of_u8(), Literal::U16(_) => type_engine.id_of_u16(), Literal::U32(_) => type_engine.id_of_u32(), Literal::U64(_) => type_engine.id_of_u64(), Literal::U256(_) => type_engine.id_of_u256(), Literal::Boolean(_) => type_engine.id_of_bool(), Literal::B256(_) => type_engine.id_of_b256(), Literal::Binary(_) => type_engine.id_of_raw_slice(), }; ty::TyExpression { expression: ty::TyExpressionVariant::Literal(lit), return_type, span, } } pub(crate) fn type_check_variable_expression( handler: &Handler, ctx: TypeCheckContext, name: Ident, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let exp = match ctx.resolve_symbol(&Handler::default(), &name).ok() { Some(ty::TyDecl::VariableDecl(decl)) => { let ty::TyVariableDecl { name: decl_name, mutability, return_type, .. } = *decl; ty::TyExpression { return_type, expression: ty::TyExpressionVariant::VariableExpression { name: decl_name.clone(), span: name.span(), mutability, call_path: Some( CallPath::from(decl_name.clone()) .to_fullpath(ctx.engines(), ctx.namespace()), ), }, span, } } Some(ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id, .. })) => { let const_decl = (*decl_engine.get_constant(&decl_id)).clone(); let decl_name = const_decl.name().clone(); ty::TyExpression { return_type: const_decl.return_type, expression: ty::TyExpressionVariant::ConstantExpression { decl: Box::new(const_decl), span: name.span(), call_path: Some( CallPath::from(decl_name).to_fullpath(ctx.engines(), ctx.namespace()), ), }, span, } } Some(ty::TyDecl::ConfigurableDecl(ty::ConfigurableDecl { decl_id, .. })) => { let decl = (*decl_engine.get_configurable(&decl_id)).clone(); let decl_name = decl.name().clone(); ty::TyExpression { return_type: decl.return_type, expression: ty::TyExpressionVariant::ConfigurableExpression { decl: Box::new(decl), span: name.span(), call_path: Some( CallPath::from(decl_name).to_fullpath(ctx.engines(), ctx.namespace()), ), }, span, } } Some(ty::TyDecl::ConstGenericDecl(ty::ConstGenericDecl { decl_id })) => { let decl = (*decl_engine.get(&decl_id)).clone(); ty::TyExpression { return_type: decl.return_type, expression: ty::TyExpressionVariant::ConstGenericExpression { decl: Box::new(decl), span: name.span(), call_path: CallPath { prefixes: vec![], suffix: name.clone(), callpath_type: CallPathType::Ambiguous, }, }, span, } } Some(ty::TyDecl::AbiDecl(ty::AbiDecl { decl_id, .. })) => { let decl = decl_engine.get_abi(&decl_id); ty::TyExpression { return_type: decl.create_type_id(engines), expression: ty::TyExpressionVariant::AbiName(AbiName::Known( decl.name.clone().into(), )), span, } } Some(a) => { let err = handler.emit_err(CompileError::NotAVariable { name: name.clone(), what_it_is: a.friendly_type_name_with_acronym(), span, }); ty::TyExpression::error(err, name.span(), engines) } None => { let err = handler.emit_err(CompileError::UnknownVariable { var_name: name.clone(), span, }); ty::TyExpression::error(err, name.span(), engines) } }; Ok(exp) } fn type_check_function_application( handler: &Handler, mut ctx: TypeCheckContext, mut call_path_binding: TypeBinding<CallPath>, arguments: &[Expression], span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { // Grab the fn declaration. let (fn_ref, _, _): (DeclRefFunction, _, _) = TypeBinding::type_check(&mut call_path_binding, handler, ctx.by_ref())?; instantiate_function_application( handler, ctx, fn_ref, call_path_binding, Some(arguments), span, ) } fn type_check_lazy_operator( handler: &Handler, ctx: TypeCheckContext, op: LazyOp, lhs: &Expression, rhs: &Expression, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let mut ctx = ctx.with_help_text(""); let engines = ctx.engines(); let typed_lhs = ty::TyExpression::type_check(handler, ctx.by_ref(), lhs) .unwrap_or_else(|err| ty::TyExpression::error(err, lhs.span().clone(), engines)); let typed_rhs = ty::TyExpression::type_check(handler, ctx.by_ref(), rhs) .unwrap_or_else(|err| ty::TyExpression::error(err, rhs.span().clone(), engines)); let type_annotation = ctx.type_annotation(); let exp = instantiate_lazy_operator(op, typed_lhs, typed_rhs, type_annotation, span); Ok(exp) } fn type_check_code_block( handler: &Handler, mut ctx: TypeCheckContext, contents: &CodeBlock, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let (typed_block, block_return_type) = match ty::TyCodeBlock::type_check(handler, ctx.by_ref(), contents, false) { Ok(res) => {
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/mod.rs
sway-core/src/semantic_analysis/ast_node/expression/mod.rs
mod intrinsic_function; mod match_expression; pub mod typed_expression; pub(crate) use match_expression::*;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/tuple_index_access.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/tuple_index_access.rs
use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; use crate::{language::ty, CompileError, Engines, TypeInfo}; pub(crate) fn instantiate_tuple_index_access( handler: &Handler, engines: &Engines, parent: ty::TyExpression, index: usize, index_span: Span, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = engines.te(); let mut current_prefix_te = Box::new(parent); let mut current_type = type_engine.get_unaliased(current_prefix_te.return_type); let prefix_type_id = current_prefix_te.return_type; let prefix_span = current_prefix_te.span.clone(); // Create the prefix part of the final tuple element access expression. // This might be an expression that directly evaluates to a tuple type, // or an arbitrary number of dereferencing expressions where the last one // dereference to a tuple type. // // We will either hit a tuple at the end or return an error, so the // loop cannot be endless. while !current_type.is_tuple() { match &*current_type { TypeInfo::Ref { referenced_type, .. } => { let referenced_type_id = referenced_type.type_id; current_prefix_te = Box::new(ty::TyExpression { expression: ty::TyExpressionVariant::Deref(current_prefix_te), return_type: referenced_type_id, span: prefix_span.clone(), }); current_type = type_engine.get_unaliased(referenced_type_id); } TypeInfo::ErrorRecovery(err) => return Err(*err), _ => { return Err( handler.emit_err(CompileError::TupleElementAccessOnNonTuple { actually: engines.help_out(prefix_type_id).to_string(), span: prefix_span, index, index_span, }), ) } }; } let TypeInfo::Tuple(type_args) = &*current_type else { panic!("The current type must be a tuple."); }; if type_args.len() <= index { return Err(handler.emit_err(CompileError::TupleIndexOutOfBounds { index, count: type_args.len(), tuple_type: engines.help_out(prefix_type_id).to_string(), span: index_span, prefix_span, })); } Ok(ty::TyExpression { expression: ty::TyExpressionVariant::TupleElemAccess { resolved_type_of_parent: current_prefix_te.return_type, prefix: current_prefix_te, elem_to_access_num: index, elem_to_access_span: index_span, }, return_type: type_args[index].type_id, span, }) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/lazy_operator.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/lazy_operator.rs
use sway_types::Span; use crate::{ language::{ty, LazyOp}, type_system::TypeId, }; pub(crate) fn instantiate_lazy_operator( op: LazyOp, lhs: ty::TyExpression, rhs: ty::TyExpression, return_type: TypeId, span: Span, ) -> ty::TyExpression { ty::TyExpression { expression: ty::TyExpressionVariant::LazyOperator { op, lhs: Box::new(lhs), rhs: Box::new(rhs), }, return_type, span, } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/method_application.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/method_application.rs
use crate::{ decl_engine::{ engine::{DeclEngineGet, DeclEngineGetParsedDeclId, DeclEngineReplace}, DeclEngineInsert, DeclRefFunction, ReplaceDecls, UpdateConstantExpression, }, language::{ parsed::*, ty::{self, TyDecl, TyExpression, TyFunctionSig}, *, }, semantic_analysis::*, type_system::*, }; use ast_elements::{ type_argument::GenericTypeArgument, type_parameter::{ConstGenericExpr, GenericTypeParameter}, }; use ast_node::typed_expression::check_function_arguments_arity; use indexmap::IndexMap; use itertools::izip; use std::collections::{BTreeMap, HashMap, VecDeque}; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{constants, BaseIdent, IdentUnique, Named}; use sway_types::{constants::CONTRACT_CALL_COINS_PARAMETER_NAME, Spanned}; use sway_types::{Ident, Span}; #[allow(clippy::too_many_arguments)] pub(crate) fn type_check_method_application( handler: &Handler, mut ctx: TypeCheckContext, mut method_name_binding: TypeBinding<MethodName>, contract_call_params: Vec<StructExpressionField>, arguments: &[Expression], span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let coercion_check = UnifyCheck::coercion(engines); // type check the function arguments (1st pass) // Some arguments may fail on this first pass because they may require the type_annotation to the parameter type. // If they fail the args_opt_buf will contain a None value. let mut args_opt_buf = VecDeque::new(); for (index, arg) in arguments.iter().enumerate() { let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_engine.new_unknown()); // Ignore errors in method parameters // On the second pass we will throw the errors if they persist. let arg_handler = Handler::default(); let arg_opt = ty::TyExpression::type_check(&arg_handler, ctx, arg).ok(); let has_errors = arg_handler.has_errors(); let has_numerics = arg_opt .as_ref() .map(|x| { x.return_type .extract_inner_types(engines, IncludeSelf::Yes) .iter() .any(|x| matches!(&*engines.te().get(*x), TypeInfo::Numeric)) }) .unwrap_or_default(); let needs_second_pass = has_errors || has_numerics; if index == 0 { // We want to emit errors in the self parameter and ignore TraitConstraintNotSatisfied with Placeholder // which may be recoverable on the second pass. arg_handler.retain_err(|e| { if let CompileError::TraitConstraintNotSatisfied { type_id, .. } = e { !matches!( *type_engine.get(TypeId::from(*type_id)), TypeInfo::Placeholder(_) ) } else { true } }); handler.append(arg_handler.clone()); } args_opt_buf.push_back((arg_opt, arg_handler, needs_second_pass)); } // resolve the method name to a typed function declaration and type_check let arguments_types = args_opt_buf .iter() .map(|(arg, _, _has_errors)| match arg { Some(arg) => arg.return_type, None => type_engine.new_unknown(), }) .collect::<Vec<_>>(); let method_result = resolve_method_name( handler, ctx.by_ref(), &method_name_binding, &arguments_types, ); // In case resolve_method_name fails throw argument errors. let (original_decl_ref, method_target) = if let Err(e) = method_result { for (_, arg_handler, _) in args_opt_buf.iter() { handler.append(arg_handler.clone()); } return Err(e); } else { method_result.unwrap() }; let original_decl = engines.de().get(original_decl_ref.id()); let const_generics = prepare_const_generics_materialization( engines, args_opt_buf .iter() .map(|x| x.0.as_ref().unwrap().return_type), original_decl .parameters .iter() .map(|x| x.type_argument.type_id), original_decl.type_parameters.iter(), ); let mut fn_ref = monomorphize_method( handler, ctx.by_ref(), original_decl_ref.clone(), method_name_binding.type_arguments.to_vec_mut(), const_generics, )?; let mut method = (*decl_engine.get_function(&fn_ref)).clone(); // unify method return type with current ctx.type_annotation(). type_engine.unify_with_generic( handler, engines, method.return_type.type_id, ctx.type_annotation(), &method_name_binding.span(), "Function return type does not match up with local type annotation.", || None, ); // type check the function arguments (2nd pass) let mut args_buf = VecDeque::new(); for (arg, index, arg_opt) in izip!(arguments.iter(), 0.., args_opt_buf.iter().cloned()) { let param_index = if method.is_contract_call { if index == 0 { if let (Some(arg), _, _) = arg_opt { args_buf.push_back(arg); } continue; } index - 1 //contract call methods don't have self parameter. } else { index }; if let (Some(arg), _, false) = arg_opt { if let Some(param) = method.parameters.get(param_index) { if coercion_check.check(arg.return_type, param.type_argument.type_id) { // If argument type coerces to resolved method parameter type skip second type_check. args_buf.push_back(arg); continue; } } else { args_buf.push_back(arg); continue; } } // We type check the argument expression again this time throwing out the error. let ctx = if let Some(param) = method.parameters.get(param_index) { // We now try to type check it again, this time with the type annotation. ctx.by_ref() .with_help_text( "Function application argument type must match function parameter type.", ) .with_type_annotation(param.type_argument.type_id) } else { ctx.by_ref() .with_help_text("") .with_type_annotation(type_engine.new_unknown()) }; args_buf.push_back( ty::TyExpression::type_check(handler, ctx, arg) .unwrap_or_else(|err| ty::TyExpression::error(err, span.clone(), engines)), ); } // check the method visibility if span.source_id() != method.span.source_id() && method.visibility.is_private() { return Err(handler.emit_err(CompileError::CallingPrivateLibraryMethod { name: method.name.as_str().to_string(), span, })); } if !method.is_contract_call && !contract_call_params.is_empty() { handler.emit_err(CompileError::CallParamForNonContractCallMethod { span: contract_call_params[0].name.span(), }); } // generate the map of the contract call params let mut untyped_contract_call_params_map = std::collections::HashMap::new(); let mut contract_call_params_map = IndexMap::new(); if method.is_contract_call { for param_name in &[ constants::CONTRACT_CALL_GAS_PARAMETER_NAME, constants::CONTRACT_CALL_COINS_PARAMETER_NAME, constants::CONTRACT_CALL_ASSET_ID_PARAMETER_NAME, ] { if contract_call_params .iter() .filter(|&param| param.name.span().as_str() == *param_name) .count() > 1 { handler.emit_err(CompileError::ContractCallParamRepeated { param_name: param_name.to_string(), span: span.clone(), }); } } for param in contract_call_params { match param.name.span().as_str() { constants::CONTRACT_CALL_GAS_PARAMETER_NAME | constants::CONTRACT_CALL_COINS_PARAMETER_NAME | constants::CONTRACT_CALL_ASSET_ID_PARAMETER_NAME => { untyped_contract_call_params_map .insert(param.name.to_string(), param.value.clone()); let type_annotation = if param.name.span().as_str() != constants::CONTRACT_CALL_ASSET_ID_PARAMETER_NAME { type_engine.id_of_u64() } else { type_engine.id_of_b256() }; let ctx = ctx .by_ref() .with_help_text("") .with_type_annotation(type_annotation); contract_call_params_map.insert( param.name.to_string(), ty::TyExpression::type_check(handler, ctx, &param.value).unwrap_or_else( |err| ty::TyExpression::error(err, span.clone(), engines), ), ); } _ => { handler.emit_err(CompileError::UnrecognizedContractParam { param_name: param.name.to_string(), span: param.name.span().clone(), }); } }; } // check if method is non-payable but we do not know _statically_ // the amount of coins sent in the contract call is zero // if the coins contract call parameter is not specified // it's considered to be zero and hence no error needs to be reported if let Some(coins_expr) = contract_call_params_map.get(CONTRACT_CALL_COINS_PARAMETER_NAME) { if coins_analysis::possibly_nonzero_u64_expression(&ctx, coins_expr) && !method .attributes .has_any_of_kind(crate::transform::AttributeKind::Payable) { return Err( handler.emit_err(CompileError::CoinsPassedToNonPayableMethod { fn_name: method.name.clone(), span, }), ); } } } // If this function is being called with method call syntax, a.b(c), // then make sure the first parameter is self, else issue an error. let mut is_method_call_syntax_used = false; if !method.is_contract_call { if let MethodName::FromModule { ref method_name } = method_name_binding.inner { if let Some(first_arg) = args_buf.front() { // check if the user calls an ABI supertrait's method (those are private) // as a contract method if let TypeInfo::ContractCaller { .. } = &*type_engine.get(first_arg.return_type) { return Err(handler.emit_err( CompileError::AbiSupertraitMethodCallAsContractCall { fn_name: method_name.clone(), span, }, )); } } is_method_call_syntax_used = true; let is_first_param_self = method .parameters .first() .map(|f| f.is_self()) .unwrap_or_default(); if !is_first_param_self { return Err( handler.emit_err(CompileError::AssociatedFunctionCalledAsMethod { fn_name: method_name.clone(), span, }), ); } } } // Validate mutability of self. Check that the variable that the method is called on is mutable // _if_ the method requires mutable self. fn mutability_check( handler: &Handler, ctx: &TypeCheckContext, method_name_binding: &TypeBinding<MethodName>, span: &Span, exp: &ty::TyExpressionVariant, ) -> Result<(), ErrorEmitted> { match exp { ty::TyExpressionVariant::VariableExpression { name, .. } => { let unknown_decl = ctx.resolve_symbol(&Handler::default(), name)?; let is_decl_mutable = match unknown_decl { ty::TyDecl::ConstantDecl { .. } => false, _ => { let variable_decl = unknown_decl .expect_variable(handler, ctx.engines()) .cloned()?; variable_decl.mutability.is_mutable() } }; if !is_decl_mutable { return Err(handler.emit_err(CompileError::MethodRequiresMutableSelf { method_name: method_name_binding.inner.easy_name(), variable_name: name.clone(), span: span.clone(), })); } Ok(()) } ty::TyExpressionVariant::StructFieldAccess { prefix, .. } => { mutability_check(handler, ctx, method_name_binding, span, &prefix.expression) } _ => Ok(()), } } if let ( Some(ty::TyExpression { expression: exp, .. }), Some(ty::TyFunctionParameter { is_mutable, .. }), ) = (args_buf.front(), method.parameters.first()) { if *is_mutable { mutability_check(handler, &ctx, &method_name_binding, &span, exp)?; } } // retrieve the function call path let call_path = match method_name_binding.inner.clone() { MethodName::FromType { call_path_binding, method_name, } => { let mut prefixes = call_path_binding.inner.prefixes; prefixes.push(match &call_path_binding.inner.suffix { ( TypeInfo::Custom { qualified_call_path: call_path, .. }, .., ) => call_path.call_path.clone().suffix, (_, ident) => ident.clone(), }); CallPath { prefixes, suffix: method_name, callpath_type: call_path_binding.inner.callpath_type, } } MethodName::FromModule { method_name } => CallPath { prefixes: vec![], suffix: method_name, callpath_type: CallPathType::Ambiguous, }, MethodName::FromTrait { call_path } => call_path, MethodName::FromQualifiedPathRoot { method_name, .. } => CallPath { prefixes: vec![], suffix: method_name, callpath_type: CallPathType::Ambiguous, }, }; // build the function selector let selector = if method.is_contract_call { let contract_caller = args_buf.pop_front(); let contract_address = match contract_caller .clone() .map(|x| (*type_engine.get(x.return_type)).clone()) { Some(TypeInfo::ContractCaller { address, .. }) => match address { Some(address) => address, None => { return Err(handler.emit_err(CompileError::ContractAddressMustBeKnown { span: call_path.span(), })); } }, None => { return Err(handler.emit_err(CompileError::ContractCallsItsOwnMethod { span })); } _ => { return Err(handler.emit_err(CompileError::Internal( "Attempted to find contract address of non-contract-call.", span, ))); } }; let func_selector = if ctx.experimental.new_encoding { None } else { Some( method .to_fn_selector_value(handler, engines) .unwrap_or([0; 4]), ) }; Some(ty::ContractCallParams { func_selector, contract_address: contract_address.clone(), contract_caller: Box::new(contract_caller.unwrap()), }) } else { None }; // check that the number of parameters and the number of the arguments is the same check_function_arguments_arity( handler, args_buf.len(), &method, &call_path, is_method_call_syntax_used, )?; let old_arguments = arguments; let arguments = method .parameters .iter() .map(|m| m.name.clone()) .zip(args_buf) .collect::<Vec<_>>(); // unify the types of the arguments with the types of the parameters from the function declaration let arguments = unify_arguments_and_parameters(handler, ctx.by_ref(), &arguments, &method.parameters)?; if ctx.experimental.new_encoding && method.is_contract_call { fn call_contract_call( ctx: &mut TypeCheckContext, original_span: Span, return_type: TypeId, method_name_expr: Expression, _caller: Expression, arguments: Vec<Expression>, typed_arguments: Vec<TypeId>, coins_expr: Expression, asset_id_expr: Expression, gas_expr: Expression, ) -> Expression { let tuple_args_type_id = ctx .engines .te() .insert_tuple_without_annotations(ctx.engines, typed_arguments); Expression { kind: ExpressionKind::FunctionApplication(Box::new( FunctionApplicationExpression { call_path_binding: TypeBinding { inner: CallPath { prefixes: vec![], suffix: Ident::new_no_span("contract_call".into()), callpath_type: CallPathType::Ambiguous, }, type_arguments: TypeArgs::Regular(vec![ GenericArgument::Type(GenericTypeArgument { type_id: return_type, initial_type_id: return_type, span: Span::dummy(), call_path_tree: None, }), GenericArgument::Type(GenericTypeArgument { type_id: tuple_args_type_id, initial_type_id: tuple_args_type_id, span: Span::dummy(), call_path_tree: None, }), ]), span: Span::dummy(), }, resolved_call_path_binding: None, arguments: vec![ Expression { kind: ExpressionKind::Literal(Literal::B256([0u8; 32])), span: Span::dummy(), }, method_name_expr, as_tuple(arguments), coins_expr, asset_id_expr, gas_expr, ], }, )), span: original_span, } } fn method_name_literal(method_name: &BaseIdent) -> Expression { let method_name_str = method_name.as_str(); let len_bytes = (method_name_str.len() as u64).to_be_bytes(); let mut blob = Vec::with_capacity(len_bytes.len() + method_name_str.len()); blob.extend(len_bytes); blob.extend(method_name_str.as_bytes()); Expression { kind: ExpressionKind::Literal(Literal::Binary(blob)), span: method_name.span(), } } fn as_tuple(elements: Vec<Expression>) -> Expression { Expression { kind: ExpressionKind::Tuple(elements), span: Span::dummy(), } } let gas_expr = untyped_contract_call_params_map .remove(constants::CONTRACT_CALL_GAS_PARAMETER_NAME) .unwrap_or_else(|| Expression { kind: ExpressionKind::Literal(Literal::U64(u64::MAX)), span: Span::dummy(), }); let coins_expr = untyped_contract_call_params_map .remove(constants::CONTRACT_CALL_COINS_PARAMETER_NAME) .unwrap_or_else(|| Expression { kind: ExpressionKind::Literal(Literal::U64(0)), span: Span::dummy(), }); let asset_id_expr = untyped_contract_call_params_map .remove(constants::CONTRACT_CALL_ASSET_ID_PARAMETER_NAME) .unwrap_or_else(|| Expression { kind: ExpressionKind::Literal(Literal::B256([0u8; 32])), span: Span::dummy(), }); // We need all impls of return type to be in scope, so that at call place we have access to its // AbiDecode impl. for type_id in method .return_type .type_id .extract_inner_types(engines, IncludeSelf::Yes) { ctx.impls_import(engines, type_id); } let args = old_arguments.iter().skip(1).cloned().collect(); let contract_call = call_contract_call( &mut ctx, span, method.return_type.type_id, method_name_literal(&method.name), old_arguments.first().cloned().unwrap(), args, arguments.iter().map(|x| x.1.return_type).collect(), coins_expr, asset_id_expr, gas_expr, ); let mut expr = TyExpression::type_check(handler, ctx.by_ref(), &contract_call)?; // We need to "fix" contract_id here because it was created with zero // given that we only have it as TyExpression, therefore can only use it after we type_check // `expr`` match &mut expr.expression { ty::TyExpressionVariant::FunctionApplication { arguments, contract_caller, .. } => { let selector = selector.unwrap(); arguments[0].1 = (*selector.contract_address).clone(); *contract_caller = Some(selector.contract_caller); } _ => unreachable!(), } return Ok(expr); } // Unify method type parameters with implementing type type parameters. if let Some(implementing_for) = method.implementing_for { if let Some(TyDecl::ImplSelfOrTrait(t)) = &method.implementing_type { let t = &engines.de().get(&t.decl_id).implementing_for; if let TypeInfo::Custom { type_arguments: Some(type_arguments), .. } = &*type_engine.get(t.initial_type_id) { // Method type parameters that have is_from_parent set to true use the base ident as defined in // in the impl trait. The type parameter name may be different in the Struct or Enum. // Thus we use the index in the Struct's or Enum's type parameter the impl trait type parameter // was used on. let mut names_index = HashMap::<Ident, usize>::new(); for (index, t_arg) in type_arguments.iter().enumerate() { if let TypeInfo::Custom { qualified_call_path, .. } = &*type_engine.get(t_arg.initial_type_id()) { names_index.insert(qualified_call_path.call_path.suffix.clone(), index); } } let implementing_type_parameters = implementing_for.get_type_parameters(engines); if let Some(implementing_type_parameters) = implementing_type_parameters { for p in method.type_parameters.clone() { let Some(p) = p.as_type_parameter() else { continue; }; if p.is_from_parent { if let Some(impl_type_param) = names_index.get(&p.name).and_then(|type_param_index| { implementing_type_parameters.get(*type_param_index) }) { let impl_type_param = impl_type_param .as_type_parameter() .expect("only works with type parameters"); handler.scope(|handler| { type_engine.unify_with_generic( handler, engines, p.type_id, impl_type_param.type_id, &call_path.span(), "Function type parameter does not match up with implementing type type parameter.", || None, ); Ok(()) })?; } } } } } } } let mut method_return_type_id = method.return_type.type_id; let method_ident: IdentUnique = method.name.clone().into(); let method_sig = TyFunctionSig::from_fn_decl(&method); if let Some(cached_fn_ref) = ctx.engines() .qe() .get_function(engines, &method_ident, method_sig.clone()) { fn_ref = cached_fn_ref; } else { if let Some(TyDecl::ImplSelfOrTrait(t)) = &method.implementing_type { let t = &engines.de().get(&t.decl_id).implementing_for; if let TypeInfo::Custom { qualified_call_path, type_arguments, } = &*type_engine.get(t.initial_type_id) { let mut subst_type_parameters = vec![]; let mut subst_type_arguments = vec![]; let mut names_type_ids = HashMap::<Ident, TypeId>::new(); if let Some(type_arguments) = type_arguments { for t_arg in type_arguments.iter() { if let TypeInfo::Custom { qualified_call_path, .. } = &*type_engine.get(t_arg.initial_type_id()) { names_type_ids.insert( qualified_call_path.call_path.suffix.clone(), t_arg.type_id(), ); } } } // This handles the case of substituting the generic blanket type by `method_target`. for p in method.type_parameters.iter() { if p.name().as_str() == qualified_call_path.call_path.suffix.as_str() { subst_type_parameters.push(t.initial_type_id); subst_type_arguments.push(method_target); break; } } // This will subst inner method_application placeholders with the already resolved // current method application type parameter for p in method .type_parameters .iter() .filter(|x| x.as_type_parameter().is_some()) { if names_type_ids.contains_key(p.name()) { let type_id = p .as_type_parameter() .expect("only works with type parameters") .type_id; subst_type_parameters.push(engines.te().new_placeholder(p.clone())); subst_type_arguments.push(type_id); } } let type_subst = TypeSubstMap::from_type_parameters_and_type_arguments( subst_type_parameters.into_iter(), subst_type_arguments.into_iter(), ); method.subst(&SubstTypesContext::new( handler, engines, &type_subst, !ctx.code_block_first_pass(), )); } } if !ctx.code_block_first_pass() { // Handle the trait constraints. This includes checking to see if the trait // constraints are satisfied and replacing old decl ids based on the // constraint with new decl ids based on the new type. let decl_mapping = GenericTypeParameter::gather_decl_mapping_from_trait_constraints( handler, ctx.by_ref(), &method.type_parameters, method.name.as_str(), &call_path.span(), ) .ok(); if let Some(decl_mapping) = decl_mapping { method.replace_decls(&decl_mapping, handler, &mut ctx)?; } } let method_sig = TyFunctionSig::from_fn_decl(&method); method_return_type_id = method.return_type.type_id; decl_engine.replace(*fn_ref.id(), method.clone()); if !ctx.code_block_first_pass() && method_sig.is_concrete(engines) && method.is_type_check_finalized && !method.is_trait_method_dummy { ctx.engines() .qe() .insert_function(engines, method_ident, method_sig, fn_ref.clone()); } } let expression = ty::TyExpressionVariant::FunctionApplication { call_path, arguments, fn_ref, selector, type_binding: Some(method_name_binding.strip_inner()), method_target: Some(method_target), contract_call_params: contract_call_params_map, contract_caller: None, }; let exp = ty::TyExpression { expression, return_type: method_return_type_id, span, }; Ok(exp) } pub(crate) fn prepare_const_generics_materialization<'a>( engines: &crate::Engines, mut args_types: impl Iterator<Item = TypeId>, mut param_types: impl Iterator<Item = TypeId>, mut type_parameters: impl Iterator<Item = &'a TypeParameter>, ) -> BTreeMap<String, TyExpression> { let mut const_generics = BTreeMap::new(); let has_const_generic_parameters = type_parameters.any(|x| matches!(x, TypeParameter::Const(_))); if has_const_generic_parameters { let a = engines.te().get(param_types.next().unwrap()); let b = engines.te().get(args_types.next().unwrap()); match (&*a, &*b) { ( TypeInfo::Array( _, Length(ConstGenericExpr::AmbiguousVariableExpression { ident, .. }), ), TypeInfo::Array(_, Length(ConstGenericExpr::Literal { val, .. })), ) => { const_generics.insert( ident.as_str().to_string(), TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::U64(*val as u64)), return_type: engines.te().id_of_u64(), span: Span::dummy(), },
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/unsafe_downcast.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/unsafe_downcast.rs
use sway_types::Span; use crate::language::ty; /// Returns an [ty::TyExpressionVariant::UnsafeDowncast] expression that /// downcasts the expression `exp`, resulting in enum variant `variant`, /// to its underlying type. /// The expression `exp` **must** result in an enum variant `variant`. /// E.g., for `let a = MyEnum::A(u64, u32)` downcasting `a` to `MyEnum::A` /// will result in `a as (u64, u32)`. pub(crate) fn instantiate_enum_unsafe_downcast( exp: &ty::TyExpression, variant: ty::TyEnumVariant, call_path_decl: ty::TyDecl, span: Span, ) -> ty::TyExpression { ty::TyExpression { expression: ty::TyExpressionVariant::UnsafeDowncast { exp: Box::new(exp.clone()), variant: variant.clone(), call_path_decl, }, return_type: variant.type_argument.type_id, span, } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/struct_instantiation.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/struct_instantiation.rs
use std::collections::BTreeSet; use itertools::Itertools; use sway_error::{ error::{CompileError, StructFieldUsageContext}, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span, Spanned}; use crate::{ decl_engine::DeclRefStruct, language::{ parsed::*, ty::{self, StructAccessInfo, TyStructField}, CallPath, Visibility, }, namespace::ResolvedTraitImplItem, semantic_analysis::{GenericShadowingMode, TypeCheckContext}, type_system::*, Engines, Namespace, }; const UNIFY_STRUCT_FIELD_HELP_TEXT: &str = "Struct field's type must match the type specified in its declaration."; pub(crate) fn struct_instantiation( handler: &Handler, mut ctx: TypeCheckContext, mut call_path_binding: TypeBinding<CallPath>, fields: &[StructExpressionField], span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); // We need the call_path_binding to have types that point to proper definitions so the LSP can // look for them, but its types haven't been resolved yet. // To that end we do a dummy type check which has the side effect of resolving the types. let _: Result<(DeclRefStruct, _, _), _> = TypeBinding::type_check(&mut call_path_binding, &Handler::default(), ctx.by_ref()); let TypeBinding { inner: CallPath { suffix, .. }, type_arguments, span: inner_span, } = &call_path_binding; if let TypeArgs::Prefix(_) = type_arguments { return Err( handler.emit_err(CompileError::DoesNotTakeTypeArgumentsAsPrefix { name: suffix.clone(), span: type_arguments.span(), }), ); } let type_arguments = type_arguments.to_vec(); // We first create a custom type and then resolve it to the struct type. let custom_type_id = match (suffix.as_str(), type_arguments.is_empty()) { ("Self", true) => type_engine.new_self_type(engines, suffix.span()), ("Self", false) => { return Err(handler.emit_err(CompileError::TypeArgumentsNotAllowed { span: suffix.span(), })); } (_, true) => type_engine.new_custom_from_name(engines, suffix.clone()), (_, false) => type_engine.new_custom(engines, suffix.clone().into(), Some(type_arguments)), }; // find the module that the struct decl is in let type_info_prefix = call_path_binding .inner .to_fullpath(engines, ctx.namespace()) .prefixes; ctx.namespace() .require_module_from_absolute_path(handler, &type_info_prefix)?; // resolve the type of the struct decl let type_id = ctx .resolve_type( handler, custom_type_id, inner_span, EnforceTypeArguments::No, Some(&type_info_prefix), ) .unwrap_or_else(|err| type_engine.id_of_error_recovery(err)); // extract the struct name and fields from the type info let type_info = type_engine.get(type_id); let struct_id = type_info.expect_struct(handler, engines, &span)?; let struct_decl = decl_engine.get_struct(&struct_id); let (struct_can_be_changed, is_public_struct_access) = StructAccessInfo::get_info(engines, &struct_decl, ctx.namespace()).into(); let struct_has_private_fields = struct_decl.has_private_fields(); let struct_can_be_instantiated = !is_public_struct_access || !struct_has_private_fields; let all_fields_are_private = struct_decl.has_only_private_fields(); let struct_is_empty = struct_decl.is_empty(); let struct_name = struct_decl.call_path.suffix.clone(); let struct_decl_span = struct_decl.span(); // Before we do the type check, let's first check for the field related errors (privacy issues, non-existing fields, ...). // These errors are independent of the type check, so we can collect all of them and then proceed with the type check. // To avoid conflicting and overlapping errors, we follow the Rust approach: // - Missing fields are reported only if the struct can actually be instantiated. // - Individual fields issues are always reported: private field access, non-existing fields. let struct_fields = &struct_decl.fields; if !struct_can_be_instantiated { let constructors = collect_struct_constructors( handler, ctx.namespace(), ctx.engines, type_id, ctx.storage_declaration(), ); handler.emit_err(CompileError::StructCannotBeInstantiated { struct_name: struct_name.clone(), span: inner_span.clone(), struct_decl_span: struct_decl.span.clone(), private_fields: struct_fields .iter() .filter(|field| field.is_private()) .map(|field| field.name.clone()) .collect(), constructors, all_fields_are_private, is_in_storage_declaration: ctx.storage_declaration(), struct_can_be_changed, }); } // Check that there are no duplicate fields. let mut seen_fields: BTreeSet<Ident> = BTreeSet::new(); for field in fields.iter() { if let Some(duplicate) = seen_fields.get(&field.name) { handler.emit_err(CompileError::StructFieldDuplicated { field_name: field.name.clone(), duplicate: duplicate.clone(), }); } seen_fields.insert(field.name.clone()); } // Check that there are no extra fields. for field in fields.iter() { if !struct_fields.iter().any(|x| x.name == field.name) { handler.emit_err(CompileError::StructFieldDoesNotExist { field_name: (&field.name).into(), // Explicit borrow to force the `From<&BaseIdent>` instead of `From<BaseIdent>`. available_fields: TyStructField::accessible_fields_names( struct_fields, is_public_struct_access, ), is_public_struct_access, struct_name: struct_name.clone(), struct_decl_span: struct_decl.span.clone(), struct_is_empty, usage_context: if ctx.storage_declaration() { StructFieldUsageContext::StorageDeclaration { struct_can_be_instantiated, } } else { StructFieldUsageContext::StructInstantiation { struct_can_be_instantiated, } }, }); } } // If the current module being checked is not a submodule of the // module in which the struct is declared, check for private fields usage. if is_public_struct_access { for field in fields.iter() { if let Some(ty_field) = struct_fields.iter().find(|x| x.name == field.name) { if ty_field.is_private() { handler.emit_err(CompileError::StructFieldIsPrivate { field_name: (&field.name).into(), struct_name: struct_name.clone(), field_decl_span: ty_field.name.span(), struct_can_be_changed, usage_context: if ctx.storage_declaration() { StructFieldUsageContext::StorageDeclaration { struct_can_be_instantiated, } } else { StructFieldUsageContext::StructInstantiation { struct_can_be_instantiated, } }, }); } } } } // Type check the fields and the struct. // If the context type annotation is a struct that can coerce into the struct to instantiate, // use the type coming from the context type annotation for type checking. // We do this to likely get a more specific type from the type annotation, although this must // not be the case. At the end, we will "merge" the struct type coming from the context and // from the struct to instantiate to cover cases like, e.g., this one: // // let _: Struct<u8, _, _> = Struct<_, bool, u32> { x: 123, y: true, z: 456 }; // // Not that, until we separate type checking and type inference phase, and do the inference // based on the overall scope, this is the best we can do to cover the largest variety of cases. // // If the context type annotation is not a struct that can coerce into the struct to instantiate, // take the struct type coming from the struct instantiation as the expected type. // This means that a type-mismatch error will be generated up the type-checking chain between // the instantiated struct type and the expected type, but the struct instantiation itself must // not necessarily be erroneous. (Examples are given below.) // // We also want to adjust the help message accordingly, depending where the type expectation is // coming from. // // E.g.: // let _: Struct<u8> = Struct { x: 123 }; // Ok. // let _: Struct<u8> = Struct { x: 123u64 }; // ^^^^^^ Expected `u8` found `u64`. // ^^^^^^ Must match **variable** declaration. // let _: Struct<u8> = Struct<bool> { x: true }; // ^^^^^^^^^^^^^^^^^^^^^^^^ Expected `Struct<u8>` found `Struct<bool>`. (But `true` is ok.) // ^^^^^^^^^^^^^^^^^^^^^^^^ Must match **variable** declaration. // let _: Struct<u8> = Struct<bool> { x: "not bool" }; // ^^^^^^^^^^ Expected `bool` found `str`. // ^^^^^^^^^^ Must match **struct** declaration. let context_expected_type_id = type_engine.get_unaliased_type_id(ctx.type_annotation()); let (is_context_type_used, type_check_struct_decl, help_text) = match &*type_engine.get(context_expected_type_id) { TypeInfo::Struct(decl_id) => { let context_expected_struct_decl = decl_engine.get_struct(decl_id); if UnifyCheck::coercion(engines) .check_structs(&context_expected_struct_decl, &struct_decl) { (true, context_expected_struct_decl, ctx.help_text()) } else { (false, struct_decl.clone(), UNIFY_STRUCT_FIELD_HELP_TEXT) } } _ => (false, struct_decl.clone(), UNIFY_STRUCT_FIELD_HELP_TEXT), }; let typed_fields = type_check_field_arguments( handler, ctx.by_ref(), &struct_name, fields, &type_check_struct_decl.fields, &span, &struct_decl_span, help_text, // Emit the missing fields error only if the struct can actually be instantiated. struct_can_be_instantiated, )?; // The above type check will unify the types behind the `type_check_struct_decl.fields` // and the resulting expression types coming from `fields`. // But if the struct coming from the context was used for the unification, we // still need to unify the resulting struct type. if is_context_type_used { // Let's unify just the struct fields first, to be able to locate the error // message to each individual initialization value, because that's where the issue is. unify_field_arguments_and_struct_fields( handler, ctx.engines(), &typed_fields, &struct_decl.fields, help_text, )?; // Then let's unify the struct types. // Note that, in this case, the type we are actually expecting is the `type_id` and the // type which was provided by the context is the one we see as received, because we did // the previous type unification based on that type. // Short-circuit if the unification fails, by checking if the scoped handler // has collected any errors. handler.scope(|handler| { type_engine.unify_with_generic( handler, engines, context_expected_type_id, type_id, &span, help_text, || None, ); Ok(()) })?; } let instantiation_span = inner_span.clone(); ctx.with_generic_shadowing_mode(GenericShadowingMode::Allow) .scoped(handler, None, |scoped_ctx| { // Insert struct type parameter into namespace. // This is required so check_type_parameter_bounds can resolve generic trait type parameters. for p in struct_decl.generic_parameters.iter() { p.insert_into_namespace_self(handler, scoped_ctx.by_ref())?; } type_id.check_type_parameter_bounds(handler, scoped_ctx.by_ref(), &span, None)?; let exp = ty::TyExpression { expression: ty::TyExpressionVariant::StructExpression { struct_id, fields: typed_fields, instantiation_span, call_path_binding, }, return_type: type_id, span, }; Ok(exp) }) } fn collect_struct_constructors( handler: &Handler, namespace: &Namespace, engines: &crate::Engines, struct_type_id: TypeId, is_in_storage_declaration: bool, ) -> Vec<String> { // Searching only for public constructors is a bit too restrictive because we can also have them in local private impls. // Checking that would be a questionable additional effort considering that this search gives good suggestions for // common patterns in which constructors can be found. // Also, strictly speaking, we could also have public module functions that create structs, // but that would be a way too much of suggestions, and moreover, it is also not a design pattern/guideline // that we wish to encourage. namespace.current_module().read(engines, |m| { let mut items = vec![]; m.append_items_for_type(engines, struct_type_id, &mut items); items .iter() .filter_map(|item| match item { ResolvedTraitImplItem::Parsed(_) => unreachable!(), ResolvedTraitImplItem::Typed(item) => match item { ty::TyTraitItem::Fn(fn_decl_id) => { Some(fn_decl_id.get_method_safe_to_unify(handler, engines, struct_type_id)) } _ => None, }, }) .map(|fn_decl_id| engines.de().get_function(&fn_decl_id)) .filter(|fn_decl| { matches!(fn_decl.visibility, Visibility::Public) && fn_decl .is_constructor(engines, struct_type_id) .unwrap_or_default() // For suggestions in storage declarations, we go for the simplest heuristics possible - // returning only parameterless constructors. Doing the const evaluation here would be // a questionable additional effort considering that this simple heuristics will give // us all the most common constructors like `default()` or `new()`. && (!is_in_storage_declaration || fn_decl.parameters.is_empty()) }) .map(|fn_decl| { // Removing the return type from the signature by searching for last `->` will work as long as we don't have something like `Fn`. format!("{}", engines.help_out((*fn_decl).clone())) .rsplit_once(" -> ") .unwrap() .0 .to_string() }) .sorted() .dedup() .collect_vec() }) } /// Type checks the field arguments. #[allow(clippy::too_many_arguments)] fn type_check_field_arguments( handler: &Handler, mut ctx: TypeCheckContext, struct_name: &Ident, fields: &[StructExpressionField], struct_fields: &[ty::TyStructField], span: &Span, struct_decl_span: &Span, help_text: &'static str, emit_missing_fields_error: bool, ) -> Result<Vec<ty::TyStructExpressionField>, ErrorEmitted> { handler.scope(|handler| { let type_engine = ctx.engines.te(); let mut typed_fields = vec![]; let mut missing_fields = vec![]; for struct_field in struct_fields.iter() { match fields.iter().find(|x| x.name == struct_field.name) { Some(field) => { let ctx = ctx .by_ref() .with_help_text(help_text) .with_type_annotation(struct_field.type_argument.type_id) .with_unify_generic(true); // TODO: Remove the `handler.scope` once https://github.com/FuelLabs/sway/issues/5606 gets solved. // We need it here so that we can short-circuit in case of a `TypeMismatch` error which is // not treated as an error in the `type_check()`'s result. let typed_expr = handler .scope(|handler| ty::TyExpression::type_check(handler, ctx, &field.value)); let value = match typed_expr { Ok(res) => res, Err(_) => continue, }; typed_fields.push(ty::TyStructExpressionField { value, name: field.name.clone(), }); } None => { missing_fields.push(struct_field.name.clone()); let err = Handler::default().emit_err( CompileError::StructInstantiationMissingFieldForErrorRecovery { field_name: struct_field.name.clone(), struct_name: struct_name.clone(), span: span.clone(), }, ); typed_fields.push(ty::TyStructExpressionField { name: struct_field.name.clone(), value: ty::TyExpression { expression: ty::TyExpressionVariant::Tuple { fields: vec![] }, return_type: type_engine.id_of_error_recovery(err), span: span.clone(), }, }); } } } if emit_missing_fields_error && !missing_fields.is_empty() { handler.emit_err(CompileError::StructInstantiationMissingFields { field_names: missing_fields, struct_name: struct_name.clone(), span: span.clone(), struct_decl_span: struct_decl_span.clone(), total_number_of_fields: struct_fields.len(), }); } Ok(typed_fields) }) } /// Unifies the field arguments and the types of the fields from the struct /// definition. fn unify_field_arguments_and_struct_fields( handler: &Handler, engines: &Engines, typed_fields: &[ty::TyStructExpressionField], struct_fields: &[ty::TyStructField], help_text: &str, ) -> Result<(), ErrorEmitted> { let type_engine = engines.te(); handler.scope(|handler| { for struct_field in struct_fields.iter() { if let Some(typed_field) = typed_fields.iter().find(|x| x.name == struct_field.name) { type_engine.unify_with_generic( handler, engines, typed_field.value.return_type, struct_field.type_argument.type_id, &typed_field.value.span, // Use the span of the initialization value. help_text, || None, ); } } Ok(()) }) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/enum_instantiation.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/enum_instantiation.rs
use crate::{ decl_engine::DeclRefEnum, language::{parsed::*, ty, CallPath}, semantic_analysis::*, type_system::*, }; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Spanned}; const UNIFY_ENUM_VARIANT_HELP_TEXT: &str = "Enum instantiator must match its declared variant type."; /// Given an enum declaration and the instantiation expression/type arguments, construct a valid /// [ty::TyExpression] of variant [ty::TyExpressionVariant::EnumInstantiation]. #[allow(clippy::too_many_arguments)] pub(crate) fn instantiate_enum( handler: &Handler, mut ctx: TypeCheckContext, enum_ref: DeclRefEnum, enum_variant_name: Ident, args_opt: Option<&[Expression]>, call_path_binding: TypeBinding<CallPath>, call_path_decl: ty::TyDecl, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let enum_decl = decl_engine.get_enum(&enum_ref); let enum_variant = enum_decl .expect_variant_from_name(handler, &enum_variant_name) .cloned()?; // Return an error if enum variant is of type unit and it is called with parenthesis. // `args_opt.is_some()` returns true when this variant was called with parenthesis. if type_engine .get(enum_variant.type_argument.initial_type_id) .is_unit() && args_opt.is_some() { return Err( handler.emit_err(CompileError::UnitVariantWithParenthesesEnumInstantiator { span: enum_variant_name.span(), ty: enum_variant.name.as_str().to_string(), }), ); } let args = args_opt.unwrap_or_default(); // If there is an instantiator, it must match up with the type. If there is not an // instantiator, then the type of the enum is necessarily the unit type. match (&args, &*type_engine.get(enum_variant.type_argument.type_id)) { ([], ty) if ty.is_unit() => Ok(ty::TyExpression { return_type: type_engine.insert_enum(engines, *enum_ref.id()), expression: ty::TyExpressionVariant::EnumInstantiation { tag: enum_variant.tag, contents: None, enum_ref, variant_name: enum_variant.name, variant_instantiation_span: enum_variant_name.span(), call_path_binding, call_path_decl, }, span: enum_variant_name.span(), }), ([single_expr], _) => { // If the context type annotation is an enum that can coerce into the enum to instantiate, // force `single_expr` to be of the enum variant type coming from the context type annotation, // We do this to likely get a more specific type from the type annotation, although this must // not be the case. At the end, we will "merge" the enum type coming from the context and // from the enum to instantiate to cover cases like, e.g., this one: // // let _: Enum<u8, _, _> = Enum::<_, bool, u32>::A(123); // // Not that, until we separate type checking and type inference phase, and do the inference // based on the overall scope, this is the best we can do to cover the largest variety of cases. // // If the context type annotation is not an enum that can coerce into the enum to instantiate, // take the enum variant type coming from the enum declaration as the expected type. // This means that a type-mismatch error will be generated up the type-checking chain between // the instantiated enum type and the expected type, but the enum instantiation itself must // not necessarily be erroneous. (Examples are given below.) // // We also want to adjust the help message accordingly, depending where the type expectation is // coming from. // // E.g.: // let _: Option<u8> = Option::Some(123); // Ok. // let _: Option<u8> = Option::Some(123u64); // ^^^^^^ Expected `u8` found `u64`. // ^^^^^^ Must match **variable** declaration. // let _: Option<u8> = Option::Some::<bool>(true); // ^^^^^^^^^^^^^^^^^^^^^^^^^^ Expected `Option<u8>` found `Option<bool>`. (But `true` is ok.) // ^^^^^^^^^^^^^^^^^^^^^^^^^^ Must match **variable** declaration. // let _: Option<u8> = Option::Some::<bool>("not bool"); // ^^^^^^^^^^ Expected `bool` found `str`. // ^^^^^^^^^^ Must match **enum** declaration. let context_expected_type_id = type_engine.get_unaliased_type_id(ctx.type_annotation()); let (is_context_type_used, enum_variant_type_id, help_text) = match &*type_engine.get(context_expected_type_id) { TypeInfo::Enum(e) => { let context_expected_enum_decl = decl_engine.get_enum(e); if UnifyCheck::coercion(engines) .check_enums(&context_expected_enum_decl, &enum_decl) { let context_expected_enum_variant = context_expected_enum_decl .expect_variant_from_name(handler, &enum_variant_name) .cloned()?; ( true, context_expected_enum_variant.type_argument.type_id, ctx.help_text(), ) } else { ( false, enum_variant.type_argument.type_id, UNIFY_ENUM_VARIANT_HELP_TEXT, ) } } _ => ( false, enum_variant.type_argument.type_id, UNIFY_ENUM_VARIANT_HELP_TEXT, ), }; let enum_ctx = ctx .by_ref() .with_help_text(help_text) .with_type_annotation(enum_variant_type_id); // TODO: Remove the `handler.scope` once https://github.com/FuelLabs/sway/issues/5606 gets solved. // We need it here so that we can short-circuit in case of a `TypeMismatch` error which is // not treated as an error in the `type_check()`'s result. let typed_expr = handler .scope(|handler| ty::TyExpression::type_check(handler, enum_ctx, single_expr))?; // Create the resulting enum type based on the enum we have instantiated. // Note that we clone the `enum_ref` but the unification we do below will // affect the types behind that new enum decl reference. let type_id = type_engine.insert_enum(engines, *enum_ref.id()); // The above type check will unify the type behind the `enum_variant_type_id` // and the resulting expression type. // But if the enum coming from the context was used for the unification, we // still need to unify the resulting enum type. if is_context_type_used { // Let's unify just the variant type first, to be able to locate the error // message to the instantiator, because that's where the issue is. // Short-circuit if the unification fails, by checking if the scoped handler // has collected any errors. handler.scope(|handler| { type_engine.unify( handler, engines, typed_expr.return_type, enum_variant.type_argument.type_id, &single_expr.span, // Use the span of the instantiator expression. help_text, || None, ); Ok(()) })?; // Then let's unify the enum types. // Note that, in this case, the type we are actually expecting is the `type_id` and the // type which was provided by the context is the one we see as received, because we did // the previous type unification based on that type. handler.scope(|handler| { type_engine.unify( handler, engines, context_expected_type_id, type_id, &enum_variant_name.span(), help_text, || None, ); Ok(()) })?; } type_id.check_type_parameter_bounds(handler, ctx, &enum_variant_name.span(), None)?; Ok(ty::TyExpression { return_type: type_id, expression: ty::TyExpressionVariant::EnumInstantiation { tag: enum_variant.tag, contents: Some(Box::new(typed_expr)), enum_ref, variant_name: enum_variant.name, variant_instantiation_span: enum_variant_name.span(), call_path_binding, call_path_decl, }, span: enum_variant_name.span(), }) } ([], _) => Err(handler.emit_err(CompileError::MissingEnumInstantiator { span: enum_variant_name.span(), })), (_too_many_expressions, ty) if ty.is_unit() => { Err(handler.emit_err(CompileError::UnnecessaryEnumInstantiator { span: enum_variant_name.span(), })) } (_too_many_expressions, ty) => { Err(handler.emit_err(CompileError::MoreThanOneEnumInstantiator { span: enum_variant_name.span(), ty: engines.help_out(ty).to_string(), })) } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/constant_expression.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/constant_expression.rs
use sway_types::Spanned; use crate::{ decl_engine::DeclRefConstant, language::{ty, CallPath}, semantic_analysis::TypeCheckContext, TypeBinding, }; pub(crate) fn instantiate_constant_expression( ctx: TypeCheckContext, const_ref: DeclRefConstant, call_path_binding: TypeBinding<CallPath>, ) -> ty::TyExpression { let const_decl = (*ctx.engines.de().get_constant(const_ref.id())).clone(); ty::TyExpression { return_type: const_decl.return_type, span: call_path_binding.span(), expression: ty::TyExpressionVariant::ConstantExpression { decl: Box::new(const_decl), span: call_path_binding.inner.suffix.span(), call_path: Some( call_path_binding .inner .to_fullpath(ctx.engines(), ctx.namespace()), ), }, } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/struct_field_access.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/struct_field_access.rs
use sway_error::{ error::{CompileError, StructFieldUsageContext}, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span, Spanned}; use crate::{ language::ty::{self, StructAccessInfo}, Engines, Namespace, TypeInfo, }; pub(crate) fn instantiate_struct_field_access( handler: &Handler, engines: &Engines, namespace: &Namespace, parent: ty::TyExpression, field_to_access: Ident, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = engines.te(); let mut current_prefix_te = Box::new(parent); let mut current_type = type_engine.get_unaliased(current_prefix_te.return_type); let prefix_type_id = current_prefix_te.return_type; let prefix_span = current_prefix_te.span.clone(); // Create the prefix part of the final struct field access expression. // This might be an expression that directly evaluates to a struct type, // or an arbitrary number of dereferencing expressions where the last one // dereferences to a struct type. // // We will either hit a struct at the end or return an error, so the // loop cannot be endless. while !current_type.is_struct() { match &*current_type { TypeInfo::Ref { referenced_type, .. } => { let referenced_type_id = referenced_type.type_id; current_prefix_te = Box::new(ty::TyExpression { expression: ty::TyExpressionVariant::Deref(current_prefix_te), return_type: referenced_type_id, span: prefix_span.clone(), }); current_type = type_engine.get_unaliased(referenced_type_id); } TypeInfo::ErrorRecovery(err) => return Err(*err), _ => { return Err(handler.emit_err(CompileError::FieldAccessOnNonStruct { actually: engines.help_out(prefix_type_id).to_string(), storage_variable: None, field_name: (&field_to_access).into(), span: prefix_span, })) } }; } let TypeInfo::Struct(struct_decl_ref) = &*current_type else { panic!("The current type must be a struct."); }; let decl = engines.de().get_struct(struct_decl_ref); let (struct_can_be_changed, is_public_struct_access) = StructAccessInfo::get_info(engines, &decl, namespace).into(); let field = match decl.find_field(&field_to_access) { Some(field) => { if is_public_struct_access && field.is_private() { return Err(handler.emit_err(CompileError::StructFieldIsPrivate { field_name: (&field_to_access).into(), struct_name: decl.call_path.suffix.clone(), field_decl_span: field.name.span(), struct_can_be_changed, usage_context: StructFieldUsageContext::StructFieldAccess, })); } field.clone() } None => { return Err(handler.emit_err(CompileError::StructFieldDoesNotExist { field_name: (&field_to_access).into(), available_fields: decl.accessible_fields_names(is_public_struct_access), is_public_struct_access, struct_name: decl.call_path.suffix.clone(), struct_decl_span: decl.span(), struct_is_empty: decl.is_empty(), usage_context: StructFieldUsageContext::StructFieldAccess, })); } }; let return_type = field.type_argument.type_id; Ok(ty::TyExpression { expression: ty::TyExpressionVariant::StructFieldAccess { resolved_type_of_parent: current_prefix_te.return_type, prefix: current_prefix_te, field_to_access: field, field_instantiation_span: field_to_access.span(), }, return_type, span, }) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/function_application.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/function_application.rs
use crate::{ decl_engine::{ engine::DeclEngineGetParsedDeclId, DeclEngineInsert, DeclRefFunction, ReplaceDecls, }, language::{ ty::{self, TyFunctionDecl, TyFunctionSig}, *, }, semantic_analysis::{ast_node::*, TypeCheckContext}, }; use ast_elements::type_parameter::GenericTypeParameter; use indexmap::IndexMap; use sway_error::error::CompileError; use sway_types::{IdentUnique, Spanned}; const UNIFY_ARGS_HELP_TEXT: &str = "The argument that has been provided to this function's type does \ not match the declared type of the parameter in the function \ declaration."; #[allow(clippy::too_many_arguments)] pub(crate) fn instantiate_function_application( handler: &Handler, mut ctx: TypeCheckContext, function_decl_ref: DeclRefFunction, call_path_binding: TypeBinding<CallPath>, arguments: Option<&[Expression]>, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let engines = ctx.engines(); let decl_engine = engines.de(); if arguments.is_none() { return Err( handler.emit_err(CompileError::MissingParenthesesForFunction { method_name: call_path_binding.inner.suffix.clone(), span: call_path_binding.inner.span(), }), ); } let function_decl = decl_engine.get_function(&function_decl_ref); let arguments = arguments.unwrap_or_default(); // check that the number of parameters and the number of the arguments is the same check_function_arguments_arity( handler, arguments.len(), &function_decl, &call_path_binding.inner, false, )?; let typed_arguments = type_check_arguments(handler, ctx.by_ref(), arguments, &function_decl.parameters)?; let typed_arguments_with_names = unify_arguments_and_parameters( handler, ctx.by_ref(), typed_arguments, &function_decl.parameters, )?; // unify function return type with current ctx.type_annotation(). engines.te().unify_with_generic( handler, engines, function_decl.return_type.type_id, ctx.type_annotation(), &call_path_binding.span(), "Function return type does not match up with local type annotation.", || None, ); let mut function_return_type_id = function_decl.return_type.type_id; let function_ident: IdentUnique = function_decl.name.clone().into(); let function_sig = TyFunctionSig::from_fn_decl(&function_decl); let new_decl_ref = if let Some(cached_fn_ref) = ctx.engines() .qe() .get_function(engines, &function_ident, function_sig.clone()) { cached_fn_ref } else { let mut function_decl = TyFunctionDecl::clone(&*function_decl); if !ctx.code_block_first_pass() { // Handle the trait constraints. This includes checking to see if the trait // constraints are satisfied and replacing old decl ids based on the // constraint with new decl ids based on the new type. let decl_mapping = GenericTypeParameter::gather_decl_mapping_from_trait_constraints( handler, ctx.by_ref(), &function_decl.type_parameters, function_decl.name.as_str(), &call_path_binding.span(), )?; function_decl.replace_decls(&decl_mapping, handler, &mut ctx)?; } let method_sig = TyFunctionSig::from_fn_decl(&function_decl); function_return_type_id = function_decl.return_type.type_id; let function_is_type_check_finalized = function_decl.is_type_check_finalized; let function_is_trait_method_dummy = function_decl.is_trait_method_dummy; let new_decl_ref = decl_engine .insert( function_decl, decl_engine .get_parsed_decl_id(function_decl_ref.id()) .as_ref(), ) .with_parent(decl_engine, (*function_decl_ref.id()).into()); if !ctx.code_block_first_pass() && method_sig.is_concrete(engines) && function_is_type_check_finalized && !function_is_trait_method_dummy { ctx.engines().qe().insert_function( engines, function_ident, method_sig, new_decl_ref.clone(), ); } new_decl_ref }; let exp = ty::TyExpression { expression: ty::TyExpressionVariant::FunctionApplication { call_path: call_path_binding.inner.clone(), arguments: typed_arguments_with_names, fn_ref: new_decl_ref, selector: None, type_binding: Some(call_path_binding.strip_inner()), method_target: None, contract_call_params: IndexMap::new(), contract_caller: None, }, return_type: function_return_type_id, span, }; Ok(exp) } /// Type checks the arguments. fn type_check_arguments( handler: &Handler, mut ctx: TypeCheckContext, arguments: &[parsed::Expression], parameters: &[ty::TyFunctionParameter], ) -> Result<Vec<ty::TyExpression>, ErrorEmitted> { let engines = ctx.engines(); // Sanity check before zipping arguments and parameters if arguments.len() != parameters.len() { return Err(handler.emit_err(CompileError::Internal( "Arguments and parameters length are not equal.", Span::dummy(), ))); } handler.scope(|handler| { let typed_arguments = arguments .iter() .zip(parameters) .map(|(arg, param)| { let ctx = ctx .by_ref() .with_help_text(UNIFY_ARGS_HELP_TEXT) .with_type_annotation(param.type_argument.type_id); ty::TyExpression::type_check(handler, ctx, arg) .unwrap_or_else(|err| ty::TyExpression::error(err, arg.span(), engines)) }) .collect(); Ok(typed_arguments) }) } /// Unifies the types of the arguments with the types of the parameters. Returns /// a list of the arguments with the names of the corresponding parameters. fn unify_arguments_and_parameters( handler: &Handler, ctx: TypeCheckContext, typed_arguments: Vec<ty::TyExpression>, parameters: &[ty::TyFunctionParameter], ) -> Result<Vec<(Ident, ty::TyExpression)>, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); let mut typed_arguments_and_names = vec![]; handler.scope(|handler| { for (arg, param) in typed_arguments.into_iter().zip(parameters.iter()) { // unify the type of the argument with the type of the param let unify_res = handler.scope(|unify_handler| { type_engine.unify( unify_handler, engines, arg.return_type, param.type_argument.type_id, &arg.span, UNIFY_ARGS_HELP_TEXT, || None, ); Ok(()) }); if unify_res.is_err() { continue; } // check for matching mutability let param_mutability = ty::VariableMutability::new_from_ref_mut(param.is_reference, param.is_mutable); if arg.gather_mutability().is_immutable() && param_mutability.is_mutable() { handler.emit_err(CompileError::ImmutableArgumentToMutableParameter { span: arg.span.clone(), }); } typed_arguments_and_names.push((param.name.clone(), arg)); } Ok(typed_arguments_and_names) }) } pub(crate) fn check_function_arguments_arity( handler: &Handler, arguments_len: usize, function_decl: &ty::TyFunctionDecl, call_path: &CallPath, is_method_call_syntax_used: bool, ) -> Result<(), ErrorEmitted> { // if is_method_call_syntax_used then we have the guarantee // that at least the self argument is passed let (expected, received) = if is_method_call_syntax_used { (function_decl.parameters.len() - 1, arguments_len - 1) } else { (function_decl.parameters.len(), arguments_len) }; match expected.cmp(&received) { std::cmp::Ordering::Equal => Ok(()), std::cmp::Ordering::Less => { Err(handler.emit_err(CompileError::TooFewArgumentsForFunction { span: call_path.span(), method_name: function_decl.name.clone(), dot_syntax_used: is_method_call_syntax_used, expected, received, })) } std::cmp::Ordering::Greater => { Err(handler.emit_err(CompileError::TooManyArgumentsForFunction { span: call_path.span(), method_name: function_decl.name.clone(), dot_syntax_used: is_method_call_syntax_used, expected, received, })) } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/typed_expression/if_expression.rs
sway-core/src/semantic_analysis/ast_node/expression/typed_expression/if_expression.rs
use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, type_error::TypeError, }; use sway_types::Span; use crate::{language::ty, semantic_analysis::TypeCheckContext, type_system::*}; pub(crate) fn instantiate_if_expression( handler: &Handler, ctx: TypeCheckContext, condition: ty::TyExpression, then: ty::TyExpression, r#else: Option<ty::TyExpression>, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); // Check the then block return type let ty_to_check = if r#else.is_some() { ctx.type_annotation() } else { type_engine.id_of_unit() }; // We check then_type_is_never and else_type_is_never before unifying to make sure we don't // unify ty_to_check with Never when another branch is not Never. let then_type_is_never = matches!(*type_engine.get(then.return_type), TypeInfo::Never); let else_type_is_never = r#else.is_some() && matches!( *type_engine.get(r#else.as_ref().unwrap().return_type), TypeInfo::Never ); if r#else.is_none() || !then_type_is_never || else_type_is_never { type_engine.unify( handler, engines, then.return_type, ty_to_check, &then.span, "`then` branch must return expected type.", || None, ); } let r#else = r#else.map(|r#else| { if !else_type_is_never || then_type_is_never { // Check the else block return type type_engine.unify( handler, engines, r#else.return_type, ty_to_check, &r#else.span, "`else` branch must return expected type.", || None, ); } Box::new(r#else) }); let r#else_ret_ty = r#else .as_ref() .map(|x| x.return_type) .unwrap_or_else(|| type_engine.id_of_unit()); // delay emitting the errors until we decide if this is a missing else branch or some other set of errors let h = Handler::default(); let unify_check = UnifyCheck::coercion(engines); // Perform unify check in both ways as Never coercion is not commutative if !unify_check.check(then.return_type, r#else_ret_ty) && !unify_check.check(r#else_ret_ty, then.return_type) { h.emit_err(CompileError::TypeError(TypeError::MismatchedType { expected: engines.help_out(then.return_type).to_string(), received: engines.help_out(r#else_ret_ty).to_string(), help_text: "The two branches of an if expression must return the same type." .to_string(), span: span.clone(), })); } let (new_errors, new_warnings, new_infos) = h.consume(); for info in new_infos { handler.emit_info(info); } for warn in new_warnings { handler.emit_warn(warn); } if new_errors.is_empty() { if !type_engine.get(r#else_ret_ty).is_unit() && r#else.is_none() { handler.emit_err(CompileError::NoElseBranch { span: span.clone(), r#type: engines.help_out(ctx.type_annotation()).to_string(), }); } } else { for err in new_errors { handler.emit_err(err); } } let return_type = if !matches!(*type_engine.get(then.return_type), TypeInfo::Never) { then.return_type } else { r#else_ret_ty }; let exp = ty::TyExpression { expression: ty::TyExpressionVariant::IfExp { condition: Box::new(condition), then: Box::new(then.clone()), r#else, }, return_type, span, }; Ok(exp) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/mod.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/mod.rs
mod analysis; mod typed; pub(crate) use analysis::*;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/reachable_report.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/reachable_report.rs
use crate::language::ty; pub(crate) struct ReachableReport { pub(crate) reachable: bool, pub(crate) scrutinee: ty::TyScrutinee, } impl ReachableReport { pub(super) fn new(reachable: bool, scrutinee: ty::TyScrutinee) -> ReachableReport { ReachableReport { reachable, scrutinee, } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/range.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/range.rs
use std::{ cmp::Ordering, fmt::{self, Write}, ops::Sub, }; use crate::CompileError; use itertools::Itertools; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; pub(crate) trait MyMath<T> { fn global_max() -> T; fn global_min() -> T; fn incr(&self) -> T; fn decr(&self) -> T; } impl MyMath<u8> for u8 { fn global_max() -> u8 { u8::MAX } fn global_min() -> u8 { u8::MIN } fn incr(&self) -> u8 { self + 1 } fn decr(&self) -> u8 { self - 1 } } impl MyMath<u16> for u16 { fn global_max() -> u16 { u16::MAX } fn global_min() -> u16 { u16::MIN } fn incr(&self) -> u16 { self + 1 } fn decr(&self) -> u16 { self - 1 } } impl MyMath<u32> for u32 { fn global_max() -> u32 { u32::MAX } fn global_min() -> u32 { u32::MIN } fn incr(&self) -> u32 { self + 1 } fn decr(&self) -> u32 { self - 1 } } impl MyMath<u64> for u64 { fn global_max() -> u64 { u64::MAX } fn global_min() -> u64 { u64::MIN } fn incr(&self) -> u64 { self + 1 } fn decr(&self) -> u64 { self - 1 } } /// A `Range<T>` is a range of values of type T. Given this range: /// /// ```ignore /// Range { /// first: 0, /// last: 3 /// } /// ``` /// /// This represents the inclusive range `[0, 3]`. (Where '[' and ']' represent /// inclusive contains.) More specifically: it is equivalent to `0, 1, 2, 3`. /// /// --- /// /// `Range<T>`s are only useful in cases in which `T` is an integer. AKA when /// `T` has discrete values. Because Sway does not have floats, this means that /// `Range<T>` can be used for all numeric and integer Sway types. #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Range<T> where T: fmt::Debug + fmt::Display + Eq + Ord + PartialEq + PartialOrd + Clone + MyMath<T> + Sub<Output = T> + Into<u64>, { first: T, last: T, } impl Range<u8> { pub(crate) fn u8() -> Range<u8> { Range { first: u8::MIN, last: u8::MAX, } } } impl Range<u16> { pub(crate) fn u16() -> Range<u16> { Range { first: u16::MIN, last: u16::MAX, } } } impl Range<u32> { pub(crate) fn u32() -> Range<u32> { Range { first: u32::MIN, last: u32::MAX, } } } impl Range<u64> { pub(crate) fn u64() -> Range<u64> { Range { first: u64::MIN, last: u64::MAX, } } } impl<T> Range<T> where T: fmt::Debug + fmt::Display + Eq + Ord + PartialEq + PartialOrd + Clone + MyMath<T> + Sub<Output = T> + Into<u64>, { /// Creates a `Range<T>` from a single value of type `T`, where the value is used /// both as the lower inclusive contains and the upper inclusive contains. pub(crate) fn from_single(x: T) -> Range<T> { Range { first: x.clone(), last: x, } } /// Creates a `Range<T>` and ensures that it is a "valid `Range<T>`" /// (i.e.) that `first` is <= to `last` fn from_double( handler: &Handler, first: T, last: T, span: &Span, ) -> Result<Range<T>, ErrorEmitted> { if last < first { Err(handler.emit_err(CompileError::Internal( "attempted to create an invalid range", span.clone(), ))) } else { Ok(Range { first, last }) } } /// Combines two ranges that overlap. There are 6 ways /// in which this might be the case: /// /// ```ignore /// A: |------------| /// B: |------| /// -> |------------| /// /// A: |------| /// B: |------------| /// -> |------------| /// /// A: |---------| /// B: |---------| /// -> |--------------| /// /// A: |---------| /// B: |---------| /// -> |--------------| /// /// A: |------| /// B: |------| /// -> |--------------| /// /// A: |------| /// B: |------| /// -> |--------------| /// ``` /// /// --- /// /// Note that because `Range<T>` relies on the assumption that `T` is an /// integer value, this algorithm joins `Range<T>`s that are within ± 1 of /// one another. Given these two `Range<T>`s: /// /// ```ignore /// Range { /// first: 0, /// last: 3 /// } /// Range { /// first: 4, /// last: 7 /// } /// ``` /// /// They can be joined into this `Range<T>`: /// /// ```ignore /// Range { /// first: 0, /// last: 7 /// } /// ``` fn join_ranges( handler: &Handler, a: &Range<T>, b: &Range<T>, span: &Span, ) -> Result<Range<T>, ErrorEmitted> { if !a.overlaps(b) && !a.within_one(b) { Err(handler.emit_err(CompileError::Internal( "these two ranges cannot be joined", span.clone(), ))) } else { let first = if a.first < b.first { a.first.clone() } else { b.first.clone() }; let last = if a.last > b.last { a.last.clone() } else { b.last.clone() }; let range = Range::from_double(handler, first, last, span)?; Ok(range) } } /// Condenses a `Vec<Range<T>>` to a `Vec<Range<T>>` of ordered, distinct, /// non-overlapping ranges. /// /// Modeled after the algorithm here: https://www.geeksforgeeks.org/merging-intervals/ /// /// 1. Sort the intervals based on increasing order of starting time. /// 2. Push the first interval on to a stack. /// 3. For each interval do the following /// 3a. If the current interval does not overlap with the stack /// top, push it. /// 3b. If the current interval overlaps with stack top (or is within ± 1) /// and ending time of current interval is more than that of stack top, /// update stack top with the ending time of current interval. /// 4. At the end stack contains the merged intervals. fn condense_ranges( handler: &Handler, ranges: Vec<Range<T>>, span: &Span, ) -> Result<Vec<Range<T>>, ErrorEmitted> { let mut ranges = ranges; let mut stack: Vec<Range<T>> = vec![]; // 1. Sort the intervals based on increasing order of starting time. ranges.sort_by(|a, b| b.first.cmp(&a.first)); // 2. Push the first interval on to a stack. let (first, rest) = match ranges.split_first() { Some((first, rest)) => (first.to_owned(), rest.to_owned()), None => { return Err( handler.emit_err(CompileError::Internal("unable to split vec", span.clone())) ); } }; stack.push(first); for range in rest.iter() { let top = match stack.pop() { Some(top) => top, None => { return Err( handler.emit_err(CompileError::Internal("stack empty", span.clone())) ); } }; if range.overlaps(&top) || range.within_one(&top) { // 3b. If the current interval overlaps with stack top (or is within ± 1) // and ending time of current interval is more than that of stack top, // update stack top with the ending time of current interval. stack.push(Range::join_ranges(handler, range, &top, span)?); } else { // 3a. If the current interval does not overlap with the stack // top, push it. stack.push(top); stack.push(range.clone()); } } stack.reverse(); Ok(stack) } /// Given an *oracle* `Range<T>` and a vec *guides* of `Range<T>`, this /// function returns the subdivided `Range<T>`s that are both within /// *oracle* not within *guides*. /// /// The steps are as follows: /// /// 1. Convert *guides* to a vec of ordered, distinct, non-overlapping /// ranges *guides*' /// 2. Check to ensure that *oracle* fully encompasses all ranges in /// *guides*'. For example, this would pass the check: /// ```ignore /// oracle: |--------------| /// guides: |--| |--| /// ``` /// But this would not: /// ```ignore /// oracle: |--------------| /// guides: |--| |--| |---| /// ``` /// 3. Given the *oracle* range `[a, b]` and the *guides*'₀ range of /// `[c, d]`, and `a != c`, construct a range of `[a, c]`. /// 4. Given *guides*' of length *n*, for every *k* 0..*n-1*, find the /// *guides*'ₖ range of `[a,b]` and the *guides*'ₖ₊₁ range of `[c, d]`, /// construct a range of `[b, c]`. You can assume that `b != d` because /// of step (1) /// 5. Given the *oracle* range of `[a, b]`, *guides*' of length *n*, and /// the *guides*'ₙ range of `[c, d]`, and `b != d`, construct a range of /// `[b, d]`. /// 6. Combine the range given from step (3), the ranges given from step /// (4), and the range given from step (5) for your result. pub(crate) fn find_exclusionary_ranges( handler: &Handler, guides: Vec<Range<T>>, oracle: Range<T>, span: &Span, ) -> Result<Vec<Range<T>>, ErrorEmitted> { // 1. Convert *guides* to a vec of ordered, distinct, non-overlapping // ranges *guides*' let condensed = Range::condense_ranges(handler, guides, span)?; // 2. Check to ensure that *oracle* fully encompasses all ranges in // *guides*'. if !oracle.encompasses_all(&condensed) { return Err(handler.emit_err(CompileError::Internal( "ranges OOB with the oracle", span.clone(), ))); } // 3. Given the *oracle* range `[a, b]` and the *guides*'₀ range of // `[c, d]`, and `a != c`, construct a range of `[a, c]`. let mut exclusionary = vec![]; let (first, last) = match (condensed.split_first(), condensed.split_last()) { (Some((first, _)), Some((last, _))) => (first, last), _ => { return Err( handler.emit_err(CompileError::Internal("could not split vec", span.clone())) ); } }; if oracle.first != first.first { exclusionary.push(Range::from_double( handler, oracle.first.clone(), first.first.decr(), span, )?); } // 4. Given *guides*' of length *n*, for every *k* 0..*n-1*, find the // *guides*'ₖ range of `[a,b]` and the *guides*'ₖ₊₁ range of `[c, d]`, // construct a range of `[b, c]`. You can assume that `b != d` because // of step (1) for (left, right) in condensed.iter().tuple_windows() { exclusionary.push(Range::from_double( handler, left.last.incr(), right.first.decr(), span, )?); } // 5. Given the *oracle* range of `[a, b]`, *guides*' of length *n*, and // the *guides*'ₙ range of `[c, d]`, and `b != d`, construct a range of // `[b, d]`. if oracle.last != last.last { exclusionary.push(Range::from_double( handler, last.last.incr(), oracle.last, span, )?); } // 6. Combine the range given from step (3), the ranges given from step // (4), and the range given from step (5) for your result. Ok(exclusionary) } /// Condenses a vec of ranges and checks to see if the condensed ranges /// equal an oracle range. pub(crate) fn do_ranges_equal_range( handler: &Handler, ranges: Vec<Range<T>>, oracle: Range<T>, span: &Span, ) -> Result<bool, ErrorEmitted> { let condensed_ranges = Range::condense_ranges(handler, ranges, span)?; if condensed_ranges.len() > 1 { Ok(false) } else { let first_range = match condensed_ranges.first() { Some(first_range) => first_range.clone(), _ => { return Err(handler.emit_err(CompileError::Internal("vec empty", span.clone()))); } }; Ok(first_range == oracle) } } /// Checks to see if two ranges overlap. There are 4 ways in which this /// might be the case: /// /// ```ignore /// A: |------------| /// B: |------| /// /// A: |------| /// B: |------------| /// /// A: |---------| /// B: |---------| /// /// A: |---------| /// B: |---------| /// ``` fn overlaps(&self, other: &Range<T>) -> bool { other.first >= self.first && other.last <= self.last || other.first <= self.first && other.last >= self.last || other.first <= self.first && other.last <= self.last && other.last >= self.first || other.first >= self.first && other.first <= self.last && other.last >= self.last } /// Checks to see if the first range encompasses the second range. There are /// 2 ways in which this might be the case: /// /// ```ignore /// A: |------------| /// B: |------| /// /// A: |------------| /// B: |------------| /// ``` fn encompasses(&self, other: &Range<T>) -> bool { self.first <= other.first && self.last >= other.last } fn encompasses_all(&self, others: &[Range<T>]) -> bool { others.iter().all(|other| self.encompasses(other)) } /// Checks to see if two ranges are within ± 1 of one another. There are 2 /// ways in which this might be the case: /// /// ```ignore /// A: |------| /// B: |------| /// /// A: |------| /// B: |------| /// ``` fn within_one(&self, other: &Range<T>) -> bool { !self.overlaps(other) && (other.first > self.last && (other.first.clone() - self.last.clone()).into() == 1u64 || self.first > other.last && (self.first.clone() - other.last.clone()).into() == 1u64) } } impl<T> fmt::Display for Range<T> where T: fmt::Debug + fmt::Display + Eq + Ord + PartialEq + PartialOrd + Clone + MyMath<T> + Sub<Output = T> + Into<u64>, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = String::new(); // Because [Range]s represent the `[n, m]` (fully) inclusive 'contains', // it is entirely possible (and normal) for the occasional [Range] to // have the same `first` and `last`. For example, if the user is // matching on `u64` values and specifies a match arm for `2` but does // not specify a match arm for `1`, then this would otherwise display as // `[MIN...1]`. While not incorrect, it looks kind of weird. So instead // we bypass this problem when displaying [Range]s. if self.first == self.last { write!(builder, "{}", self.first)?; return write!(f, "{builder}"); } builder.push('['); if self.first == T::global_min() { builder.push_str("MIN"); } else { write!(builder, "{}", self.first)?; } builder.push_str("..."); if self.last == T::global_max() { builder.push_str("MAX"); } else { write!(builder, "{}", self.last)?; } builder.push(']'); write!(f, "{builder}") } } /// Checks to see if two ranges are greater than or equal to one another. impl<T> std::cmp::Ord for Range<T> where T: fmt::Debug + fmt::Display + Eq + Ord + PartialEq + PartialOrd + Clone + MyMath<T> + Sub<Output = T> + Into<u64>, { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match (self.first.cmp(&other.first), self.last.cmp(&other.last)) { (Less, Less) => Less, (Less, Equal) => Less, (Less, Greater) => Less, (Equal, Less) => Less, (Equal, Equal) => Equal, (Equal, Greater) => Greater, (Greater, Less) => Greater, (Greater, Equal) => Greater, (Greater, Greater) => Greater, } } } impl<T> std::cmp::PartialOrd for Range<T> where T: fmt::Debug + fmt::Display + Eq + Ord + PartialEq + PartialOrd + Clone + MyMath<T> + Sub<Output = T> + Into<u64>, { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/pattern.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/pattern.rs
use std::{cmp::Ordering, fmt}; use std::fmt::Write; use sway_error::error::CompileError; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; use crate::decl_engine::DeclEngine; use crate::{language::ty, language::Literal, TypeInfo}; use super::{patstack::PatStack, range::Range}; /// A `Pattern` represents something that could be on the LHS of a match /// expression arm. /// /// For instance this match expression: /// /// ```ignore /// let x = (0, 5); /// match x { /// (0, 1) => true, /// (2, 3) => true, /// _ => false /// } /// ``` /// /// would result in these patterns: /// /// ```ignore /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]) /// Pattern::Tuple([ /// Pattern::U64(Range { first: 2, last: 2 }), /// Pattern::U64(Range { first: 3, last: 3 }) /// ]) /// Pattern::Wildcard /// ``` /// /// --- /// /// A `Pattern` is semantically constructed from a "constructor" and its /// "arguments." Given the `Pattern`: /// /// ```ignore /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]) /// ``` /// /// the constructor is: /// /// ```ignore /// Pattern::Tuple([.., ..]) /// ``` /// /// and the arguments are: /// /// ```ignore /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ] /// ``` /// /// Given the `Pattern`: /// /// ```ignore /// Pattern::U64(Range { first: 0, last: 0 }) /// ``` /// /// the constructor is: /// /// ```ignore /// Pattern::U64(Range { first: 0, last: 0 }) /// ``` /// and the arguments are empty. More specifically, in the case of u64 (and /// other numbers), we can think of u64 as a giant enum, where every u64 value /// is one variant of the enum, and each of these variants maps to a `Pattern`. /// So "2u64" can be mapped to a `Pattern` with the constructor "2u64" /// (represented as a `Range<u64>`) and with empty arguments. /// /// This idea of a constructor and arguments is used in the match exhaustivity /// algorithm. /// /// --- /// /// The variants of `Pattern` can be semantically categorized into 3 categories: /// /// 1. the wildcard pattern (Pattern::Wildcard) /// 2. the or pattern (Pattern::Or(..)) /// 3. constructed patterns (everything else) /// /// This idea of semantic categorization is used in the match exhaustivity /// algorithm. #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum Pattern { Wildcard, U8(Range<u8>), U16(Range<u16>), U32(Range<u32>), U64(Range<u64>), B256([u8; 32]), Boolean(bool), Numeric(Range<u64>), String(String), Struct(StructPattern), Enum(EnumPattern), Tuple(PatStack), Or(PatStack), } impl Pattern { /// Converts a `Scrutinee` to a `Pattern`. pub(crate) fn from_scrutinee(scrutinee: ty::TyScrutinee) -> Self { let pat = match scrutinee.variant { ty::TyScrutineeVariant::CatchAll => Pattern::Wildcard, ty::TyScrutineeVariant::Variable(_) => Pattern::Wildcard, ty::TyScrutineeVariant::Literal(value) => Pattern::from_literal(value), ty::TyScrutineeVariant::Constant(_, value, _) => Pattern::from_literal(value), ty::TyScrutineeVariant::StructScrutinee { struct_ref, fields, instantiation_call_path: _, } => { let mut new_fields = vec![]; for field in fields.into_iter() { let f = match field.scrutinee { Some(scrutinee) => Pattern::from_scrutinee(scrutinee), None => Pattern::Wildcard, }; new_fields.push((field.field.as_str().to_string(), f)); } Pattern::Struct(StructPattern { struct_name: struct_ref.name().to_string(), fields: new_fields, }) } ty::TyScrutineeVariant::Or(elems) => { let mut new_elems = PatStack::empty(); for elem in elems.into_iter() { new_elems.push(Pattern::from_scrutinee(elem)); } Pattern::Or(new_elems) } ty::TyScrutineeVariant::Tuple(elems) => { let mut new_elems = PatStack::empty(); for elem in elems.into_iter() { new_elems.push(Pattern::from_scrutinee(elem)); } Pattern::Tuple(new_elems) } ty::TyScrutineeVariant::EnumScrutinee { enum_ref, variant, value, .. } => Pattern::Enum(EnumPattern { enum_name: enum_ref.name().to_string(), variant_name: variant.name.to_string(), value: Box::new(Pattern::from_scrutinee(*value)), }), }; pat } /// Convert the given literal `value` into a pattern. fn from_literal(value: Literal) -> Pattern { match value { Literal::U8(x) => Pattern::U8(Range::from_single(x)), Literal::U16(x) => Pattern::U16(Range::from_single(x)), Literal::U32(x) => Pattern::U32(Range::from_single(x)), Literal::U64(x) => Pattern::U64(Range::from_single(x)), Literal::U256(x) => Pattern::U64(Range::from_single( x.try_into().expect("pattern only works with 64 bits"), )), Literal::B256(x) => Pattern::B256(x), Literal::Boolean(b) => Pattern::Boolean(b), Literal::Numeric(x) => Pattern::Numeric(Range::from_single(x)), Literal::String(s) => Pattern::String(s.as_str().to_string()), Literal::Binary(_) => { unreachable!("literals cannot be expressed in the language yet") } } } /// Converts a `PatStack` to a `Pattern`. If the `PatStack` is of length 1, /// this function returns the single element, if it is of length > 1, this /// function wraps the provided `PatStack` in a `Pattern::Or(..)`. pub(crate) fn from_pat_stack( handler: &Handler, pat_stack: PatStack, span: &Span, ) -> Result<Pattern, ErrorEmitted> { if pat_stack.len() == 1 { pat_stack.first(handler, span) } else { Ok(Pattern::Or(pat_stack)) } } /// Given a `Pattern` *c* and a `PatStack` *args*, extracts the constructor /// from *c* and applies it to *args*. For example, given: /// /// ```ignore /// c: Pattern::Tuple([ /// Pattern::U64(Range { first: 5, last: 7, }), /// Pattern::U64(Range { first: 10, last: 12 }) /// ]) /// args: [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ] /// ``` /// /// the extracted constructor *ctor* from *c* would be: /// /// ```ignore /// Pattern::Tuple([.., ..]) /// ``` /// /// Applying *args* to *ctor* would give: /// /// ```ignore /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]) /// ``` /// /// --- /// /// If it is the case that at lease one element of *args* is a /// or-pattern, then *args* is first "serialized". Meaning, that all /// or-patterns are extracted to create a vec of `PatStack`s *args*' where /// each `PatStack` is a copy of *args* where the index of the or-pattern is /// instead replaced with one element from the or-patterns contents. More /// specifically, given an *args* with one or-pattern that contains n /// elements, this "serialization" would result in *args*' of length n. /// Given an *args* with two or-patterns that contain n elements and m /// elements, this would result in *args*' of length n*m. /// /// Once *args*' is constructed, *ctor* is applied to every element of /// *args*' and the resulting `Pattern`s are wrapped inside of an /// or-pattern. /// /// For example, given: /// /// ```ignore /// ctor: Pattern::Tuple([.., ..]) /// args: [ /// Pattern::Or([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]), /// Pattern::Wildcard /// ] /// ``` /// /// *args* would serialize to: /// /// ```ignore /// [ /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::Wildcard /// ], /// [ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::Wildcard /// ] /// ] /// ``` /// /// applying *ctor* would create: /// /// ```ignore /// [ /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::Wildcard /// ]), /// Pattern::Tuple([ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::Wildcard /// ]), /// ] /// ``` /// /// and wrapping this in an or-pattern would create: /// /// ```ignore /// Pattern::Or([ /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::Wildcard /// ]), /// Pattern::Tuple([ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::Wildcard /// ]), /// ]) /// ``` pub(crate) fn from_constructor_and_arguments( handler: &Handler, c: &Pattern, args: PatStack, span: &Span, ) -> Result<Self, ErrorEmitted> { let pat = match c { Pattern::Wildcard => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::Wildcard } Pattern::U8(range) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::U8(range.clone()) } Pattern::U16(range) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::U16(range.clone()) } Pattern::U32(range) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::U32(range.clone()) } Pattern::U64(range) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::U64(range.clone()) } Pattern::B256(b) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::B256(*b) } Pattern::Boolean(b) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::Boolean(*b) } Pattern::Numeric(range) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::Numeric(range.clone()) } Pattern::String(s) => { if !args.is_empty() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } Pattern::String(s.clone()) } Pattern::Struct(struct_pattern) => { if args.len() != struct_pattern.fields.len() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } let pats: PatStack = args .serialize_multi_patterns(handler, span)? .into_iter() .map(|args| { Pattern::Struct(StructPattern { struct_name: struct_pattern.struct_name.clone(), fields: struct_pattern .fields .iter() .zip(args) .map(|((name, _), arg)| (name.clone(), arg)) .collect::<Vec<_>>(), }) }) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, pats, span)? } Pattern::Enum(enum_pattern) => { if args.len() != 1 { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } let serialized_args = args.serialize_multi_patterns(handler, span)?; let mut pats: PatStack = PatStack::empty(); for args in serialized_args.into_iter() { let arg = args.first(handler, span)?; pats.push(Pattern::Enum(EnumPattern { enum_name: enum_pattern.enum_name.clone(), variant_name: enum_pattern.variant_name.clone(), value: Box::new(arg), })); } Pattern::from_pat_stack(handler, pats, span)? } Pattern::Tuple(elems) => { if elems.len() != args.len() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } let pats: PatStack = args .serialize_multi_patterns(handler, span)? .into_iter() .map(Pattern::Tuple) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, pats, span)? } Pattern::Or(elems) => { if elems.len() != args.len() { return Err(handler.emit_err(CompileError::Internal( "malformed constructor request", span.clone(), ))); } let pats: PatStack = args .serialize_multi_patterns(handler, span)? .into_iter() .map(Pattern::Or) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, pats, span)? } }; Ok(pat) } /// Create a `Pattern::Wildcard` pub(crate) fn wild_pattern() -> Self { Pattern::Wildcard } /// Finds the "a value" of the `Pattern`, AKA the number of sub-patterns /// used in the pattern's constructor. For example, the pattern /// `Pattern::Tuple([.., ..])` would have an "a value" of 2. pub(crate) fn a(&self) -> usize { match self { Pattern::U8(_) => 0, Pattern::U16(_) => 0, Pattern::U32(_) => 0, Pattern::U64(_) => 0, Pattern::B256(_) => 0, Pattern::Boolean(_) => 0, Pattern::Numeric(_) => 0, Pattern::String(_) => 0, Pattern::Struct(StructPattern { fields, .. }) => fields.len(), Pattern::Enum(_) => 1, Pattern::Tuple(elems) => elems.len(), Pattern::Wildcard => 0, Pattern::Or(elems) => elems.len(), } } /// Checks to see if two `Pattern` have the same constructor. For example, /// given the patterns: /// /// ```ignore /// A: Pattern::U64(Range { first: 0, last: 0 }) /// B: Pattern::U64(Range { first: 0, last: 0 }) /// C: Pattern::U64(Range { first: 1, last: 1 }) /// ``` /// /// A and B have the same constructor but A and C do not. /// /// Given the patterns: /// /// ```ignore /// A: Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }), /// ]) /// B: Pattern::Tuple([ /// Pattern::U64(Range { first: 2, last: 2 }), /// Pattern::U64(Range { first: 3, last: 3 }), /// ]) /// C: Pattern::Tuple([ /// Pattern::U64(Range { first: 4, last: 4 }), /// ]) /// ``` /// /// A and B have the same constructor but A and C do not. pub(crate) fn has_the_same_constructor(&self, other: &Pattern) -> bool { match (self, other) { (Pattern::Wildcard, Pattern::Wildcard) => true, (Pattern::U8(a), Pattern::U8(b)) => a == b, (Pattern::U16(a), Pattern::U16(b)) => a == b, (Pattern::U32(a), Pattern::U32(b)) => a == b, (Pattern::U64(a), Pattern::U64(b)) => a == b, (Pattern::B256(a), Pattern::B256(b)) => a == b, (Pattern::Boolean(a), Pattern::Boolean(b)) => a == b, (Pattern::Numeric(a), Pattern::Numeric(b)) => a == b, (Pattern::String(a), Pattern::String(b)) => a == b, ( Pattern::Struct(StructPattern { struct_name: struct_name1, fields: fields1, }), Pattern::Struct(StructPattern { struct_name: struct_name2, fields: fields2, }), ) => struct_name1 == struct_name2 && fields1.len() == fields2.len(), ( Pattern::Enum(EnumPattern { enum_name: enum_name1, variant_name: variant_name1, .. }), Pattern::Enum(EnumPattern { enum_name: enum_name2, variant_name: variant_name2, .. }), ) => enum_name1 == enum_name2 && variant_name1 == variant_name2, (Pattern::Tuple(elems1), Pattern::Tuple(elems2)) => elems1.len() == elems2.len(), (Pattern::Or(_), Pattern::Or(_)) => unreachable!(), _ => false, } } /// Extracts the "sub-patterns" of a `Pattern`, aka the "arguments" to the /// patterns "constructor". Some patterns have 0 sub-patterns and some /// patterns have >0 sub-patterns. For example, this pattern: /// /// ```ignore /// Pattern::U64(Range { first: 0, last: 0 }), /// ``` /// /// has 0 sub-patterns. While this pattern: /// /// ```ignore /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]) /// ``` /// /// has 2 sub-patterns: /// /// ```ignore /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ] /// ``` pub(crate) fn sub_patterns( &self, handler: &Handler, span: &Span, ) -> Result<PatStack, ErrorEmitted> { let pats = match self { Pattern::Struct(StructPattern { fields, .. }) => fields .iter() .map(|(_, field)| field.to_owned()) .collect::<Vec<_>>() .into(), Pattern::Enum(EnumPattern { value, .. }) => PatStack::from_pattern((**value).clone()), Pattern::Tuple(elems) => elems.to_owned(), _ => PatStack::empty(), }; if self.a() != pats.len() { return Err(handler.emit_err(CompileError::Internal( "invariant self.a() == pats.len() broken", span.clone(), ))); } Ok(pats) } /// Performs a one-layer-deep flattening of a `Pattern` into a `PatStack`. /// If the pattern is an "or-pattern", return its contents, otherwise /// return the pattern as a `PatStack`. pub(crate) fn flatten(&self) -> PatStack { match self { Pattern::Or(pats) => pats.to_owned(), pat => PatStack::from_pattern(pat.to_owned()), } } /// Transforms this [Pattern] into a new [Pattern] that is a "root /// constructor" of the given pattern. A root constructor [Pattern] is /// defined as a pattern containing only wildcards as the subpatterns. pub(super) fn into_root_constructor(self) -> Pattern { match self { Pattern::Wildcard => Pattern::Wildcard, Pattern::U8(n) => Pattern::U8(n), Pattern::U16(n) => Pattern::U16(n), Pattern::U32(n) => Pattern::U32(n), Pattern::U64(n) => Pattern::U64(n), Pattern::B256(n) => Pattern::B256(n), Pattern::Boolean(b) => Pattern::Boolean(b), Pattern::Numeric(n) => Pattern::Numeric(n), Pattern::String(s) => Pattern::String(s), Pattern::Struct(pat) => Pattern::Struct(pat.into_root_constructor()), Pattern::Enum(pat) => Pattern::Enum(pat.into_root_constructor()), Pattern::Tuple(elems) => Pattern::Tuple(PatStack::fill_wildcards(elems.len())), Pattern::Or(elems) => { let mut pat_stack = PatStack::empty(); for elem in elems.into_iter() { pat_stack.push(elem.into_root_constructor()); } Pattern::Or(pat_stack) } } } pub(crate) fn matches_type_info(&self, type_info: &TypeInfo, decl_engine: &DeclEngine) -> bool { match (self, type_info) { ( Pattern::Enum(EnumPattern { enum_name: l_enum_name, variant_name, .. }), TypeInfo::Enum(r_enum_decl_ref), ) => { let r_decl = decl_engine.get_enum(r_enum_decl_ref); l_enum_name.as_str() == r_decl.call_path.suffix.as_str() && r_decl .variants .iter() .map(|variant_type| variant_type.name.clone()) .any(|name| name.as_str() == variant_name.as_str()) } _ => false, // NOTE: We may need to expand this in the future } } fn discriminant_value(&self) -> usize { match self { Pattern::Wildcard => 0, Pattern::U8(_) => 1, Pattern::U16(_) => 2, Pattern::U32(_) => 3, Pattern::U64(_) => 4, Pattern::B256(_) => 5, Pattern::Boolean(_) => 6, Pattern::Numeric(_) => 7, Pattern::String(_) => 8, Pattern::Struct(_) => 9, Pattern::Enum(_) => 10, Pattern::Tuple(_) => 11, Pattern::Or(_) => 12, } } } impl fmt::Display for Pattern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = match self { Pattern::Wildcard => "_".to_string(), Pattern::U8(range) => format!("{range}"), Pattern::U16(range) => format!("{range}"), Pattern::U32(range) => format!("{range}"), Pattern::U64(range) => format!("{range}"), Pattern::Numeric(range) => format!("{range}"), Pattern::B256(n) => format!("{n:#?}"), Pattern::Boolean(b) => format!("{b}"), Pattern::String(s) => s.clone(), Pattern::Struct(struct_pattern) => format!("{struct_pattern}"), Pattern::Enum(enum_pattern) => format!("{enum_pattern}"), Pattern::Tuple(elems) => { let mut builder = String::new(); builder.push('('); write!(builder, "{elems}")?; builder.push(')'); builder } Pattern::Or(elems) => elems .iter() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(" | "), }; write!(f, "{s}") } } impl std::cmp::Ord for Pattern { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match (self, other) { (Pattern::Wildcard, Pattern::Wildcard) => Equal, (Pattern::U8(x), Pattern::U8(y)) => x.cmp(y), (Pattern::U16(x), Pattern::U16(y)) => x.cmp(y), (Pattern::U32(x), Pattern::U32(y)) => x.cmp(y), (Pattern::U64(x), Pattern::U64(y)) => x.cmp(y), (Pattern::B256(x), Pattern::B256(y)) => x.cmp(y), (Pattern::Boolean(x), Pattern::Boolean(y)) => x.cmp(y), (Pattern::Numeric(x), Pattern::Numeric(y)) => x.cmp(y), (Pattern::String(x), Pattern::String(y)) => x.cmp(y), (Pattern::Struct(x), Pattern::Struct(y)) => x.cmp(y), (Pattern::Enum(x), Pattern::Enum(y)) => x.cmp(y), (Pattern::Tuple(x), Pattern::Tuple(y)) => x.cmp(y), (Pattern::Or(x), Pattern::Or(y)) => x.cmp(y), (x, y) => x.discriminant_value().cmp(&y.discriminant_value()), } } } impl std::cmp::PartialOrd for Pattern { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct StructPattern { struct_name: String, fields: Vec<(String, Pattern)>, } impl StructPattern { pub(crate) fn new(struct_name: String, fields: Vec<(String, Pattern)>) -> Self { StructPattern { struct_name, fields, } } pub(crate) fn struct_name(&self) -> &String { &self.struct_name } pub(crate) fn fields(&self) -> &Vec<(String, Pattern)> { &self.fields } pub(super) fn into_root_constructor(self) -> StructPattern { let StructPattern { struct_name, fields, } = self; StructPattern { struct_name, fields: fields .into_iter() .map(|(name, _)| (name, Pattern::Wildcard)) .collect(), } } } impl fmt::Display for StructPattern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = String::new(); builder.push_str(self.struct_name.as_str()); builder.push_str(" { "); let mut start_of_wildcard_tail = None; for (i, (_, pat)) in self.fields.iter().enumerate().rev() { match (pat, start_of_wildcard_tail) { (Pattern::Wildcard, None) => {} (_, None) => start_of_wildcard_tail = Some(i + 1), (_, _) => {} } } let s: String = match start_of_wildcard_tail { Some(start_of_wildcard_tail) => { let (front, rest) = self.fields.split_at(start_of_wildcard_tail); let mut inner_builder = front .iter() .map(|(name, field)| -> Result<_, fmt::Error> { let mut inner_builder = String::new(); inner_builder.push_str(name); inner_builder.push_str(": "); write!(inner_builder, "{field}")?; Ok(inner_builder) }) .collect::<Result<Vec<_>, _>>()? .join(", "); if !rest.is_empty() { inner_builder.push_str(", ..."); } inner_builder } None => self .fields .iter() .map(|(name, field)| -> Result<_, fmt::Error> { let mut inner_builder = String::new(); inner_builder.push_str(name); inner_builder.push_str(": "); write!(inner_builder, "{field}")?; Ok(inner_builder) }) .collect::<Result<Vec<_>, _>>()? .join(", "), }; builder.push_str(&s); builder.push_str(" }"); write!(f, "{builder}") } } impl std::cmp::Ord for StructPattern { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match self.struct_name.cmp(&other.struct_name) { Equal => self.fields.cmp(&other.fields), res => res, } } } impl std::cmp::PartialOrd for StructPattern { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct EnumPattern { pub(crate) enum_name: String, pub(crate) variant_name: String, pub(crate) value: Box<Pattern>, } impl EnumPattern { pub(super) fn into_root_constructor(self) -> EnumPattern { let EnumPattern { enum_name, variant_name, value: _, } = self; EnumPattern { enum_name, variant_name, value: Box::new(Pattern::Wildcard), } } } impl std::cmp::Ord for EnumPattern { fn cmp(&self, other: &Self) -> Ordering { use Ordering::*; match ( self.enum_name.cmp(&other.enum_name), self.variant_name.cmp(&other.variant_name), (*self.value).cmp(&*other.value), ) { // enum name is the first element to order by (Less, _, _) => Less, (Greater, _, _) => Greater, // variant name is the second element to order by (Equal, Less, _) => Less, (Equal, Greater, _) => Greater, // value is the last element to order by (Equal, Equal, Less) => Less, (Equal, Equal, Equal) => Equal, (Equal, Equal, Greater) => Greater, } } } impl std::cmp::PartialOrd for EnumPattern { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Display for EnumPattern { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut builder = String::new(); builder.push_str(self.enum_name.as_str()); builder.push_str("::"); builder.push_str(self.variant_name.as_str()); builder.push('('); builder.push_str(&self.value.to_string()); builder.push(')'); write!(f, "{builder}") } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/match_pattern_variables.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/match_pattern_variables.rs
use indexmap::IndexMap; use sway_types::{Ident, Span, Spanned}; use crate::language::ty::{self, TyScrutinee}; /// First tuple field is `true` if the variable represented with [Span] is a struct field, otherwise `false`. pub(crate) type MatchVariable = (bool, Span); pub(crate) struct MatchVariableDuplicate { pub duplicate: MatchVariable, pub first_definition: MatchVariable, } /// Returns [MatchVariableDuplicate]s for all the duplicates found in the `scrutinee`, /// or empty [Vec] if there are no duplicate variables in the `scrutinee`. /// /// Alternatives are what make the algorithm more complex than just straightforward /// scan of the `scrutinee` for variables of the same name. /// In case of alternatives, we must have the same variables in all of the alternatives, /// and these are, of course, not duplicates. /// But we can still have duplicates within the alternatives, and between the alternatives /// and the other parts of the match arm. /// /// Consider the following examples: /// /// ```ignore /// Struct { x, y: x, z: x } => x.0, /// ``` /// The second and the third `x` are the duplicates of the first `x`. /// /// ```ignore /// Struct { x, .. } | Struct { x, .. } => x, /// (Struct { x, .. } | Struct { x, .. }, Struct { y, .. } | Struct { y, .. }) => if x { 0 } else { 1 } + y, /// ``` /// Here there are no duplicates. /// /// ```ignore /// (Struct { x, .. } | Struct { x, .. }, Struct { x, .. } | Struct { x, .. }) => if x { 0 } else { 1 } + y, /// ``` /// The second `x` is not a duplicate, but the third and fourth are duplicates of the first one. /// /// ```ignore /// Struct { x, y: x, z: x } | Struct { x, y: x, z: x } => x, /// ``` /// The second and the third `x` are duplicates of the first one, and the last two of the fourth one. /// /// ```ignore /// (x, Struct { x, .. } | Struct { x, .. }, y) => y, /// ``` /// The second and the third `x` are duplicates of the first one. /// /// ```ignore /// (0, Struct { x, y: x, .. } | Struct { x, .. }, x) => x, /// ``` /// The second and the last `x` are duplicates of the first one. The third one is not a duplicate. /// /// ```ignore /// (x, Struct { x, y: x, .. } | Struct { x, .. }, x) => x, /// ``` /// All `x`s are duplicates of the first one. /// /// Why not extend `matcher` to do this analysis? /// It would be a mixing of concerns and would complicate a clean implementation of the `matcher`. /// The `matcher`'s purpose is to detect variables and their types and properly bind them. /// Duplicates are seen as shadowing of variables which is perfectly fine from the `matcher` /// perspective. /// /// How the algorithm works? /// /// For a match arm represented by the `scrutinee` it creates a tree whose nodes are variable names. /// Variables are added by moving through the match arm left to right. /// Branching in the tree occurs in the case of alternatives. /// The algorithm traverses the branches depth-first and collects all the unique duplicates for every branch. /// Unique means that a duplicate can occur only in one branch. /// At the end it merges the result of all the branches in a single result. /// /// The algorithm assumes that the `matcher` already checked the match arm. /// This gives us the guarantee that every alternative contains the same variables and that for /// the parts of the match arm that follows alternatives, we need to consider only the left-most /// alternative as a potential holder of the already defined variables. /// /// For the examples given above, the corresponding trees look like this: /// /// ```ignore /// Struct { x, y: x, z: x } => x.0, /// - x - x - x /// /// Struct { x, .. } | Struct { x, .. } => x, /// / x /// - /// \ x <= this is the first, left-most x /// /// (Struct { x, .. } | Struct { x, .. }, Struct { y, .. } | Struct { y, .. }) => if x { 0 } else { 1 } + y, /// / x /// - / y /// \ x - /// \ y <= this is the left-most y /// /// (Struct { x, .. } | Struct { x, .. }, Struct { x, .. } | Struct { x, .. }) => if x { 0 } else { 1 } + y, /// / x /// - / x /// \ x - /// \ x /// /// Struct { x, y: x, z: x } | Struct { x, y: x, z: x } => x, /// / x - x - x /// - /// \ x - x - x /// /// (x, Struct { x, .. } | Struct { x, .. }, y) => y, /// / x /// -x- /// \ x - y /// /// (0, Struct { x, y: x, .. } | Struct { x, .. }, x) => x, /// / x /// - /// \ x - x - x /// /// (x, Struct { x, y: x, .. } | Struct { x, .. }, x) => x, /// / x /// -x- /// \ x - x - x /// /// ``` /// /// And here is a some general example with nested alternatives, several variables etc. /// /// ```ignore /// (x, y, x | x | x, Struct { x, y, z } | Struct { x: y | y | y, x, z }, z, x | x, z | z | z) /// /// / x /// / / y /// - x - y - x / - y /// \ / \ y - x - z /// \ x - /// \ / x /// \ x - y - z - z / z /// \ x - z /// \ z /// ``` pub(crate) fn collect_duplicate_match_pattern_variables( scrutinee: &TyScrutinee, ) -> Vec<MatchVariableDuplicate> { let mut left_most_branch = IndexMap::new(); let mut branches = vec![]; recursively_collect_duplicate_variables(&mut branches, &mut left_most_branch, scrutinee); branches.push(left_most_branch); let mut result = vec![]; for mut branch in branches { for (ident, (is_struct_field, duplicates)) in branch.iter_mut() { for duplicate in duplicates { result.push(MatchVariableDuplicate { duplicate: (duplicate.0, duplicate.1.clone()), first_definition: (*is_struct_field, ident.span()), }); } } } result.sort_by(|a, b| match a.duplicate.1.partial_cmp(&b.duplicate.1) { Some(ord) => ord, None => unreachable!(), }); return result; fn recursively_collect_duplicate_variables( branches: &mut Vec<IndexMap<Ident, (bool, Vec<MatchVariable>)>>, left_most_branch: &mut IndexMap<Ident, (bool, Vec<MatchVariable>)>, scrutinee: &TyScrutinee, ) { match &scrutinee.variant { ty::TyScrutineeVariant::CatchAll => (), ty::TyScrutineeVariant::Variable(ident) => add_variable(left_most_branch, ident, false), ty::TyScrutineeVariant::Literal(_) => (), ty::TyScrutineeVariant::Constant { .. } => (), ty::TyScrutineeVariant::StructScrutinee { fields, .. } => { // If a field does not have a scrutinee, the field itself is a variable. for field in fields { match &field.scrutinee { Some(scrutinee) => recursively_collect_duplicate_variables( branches, left_most_branch, scrutinee, ), None => add_variable(left_most_branch, &field.field, true), } } } ty::TyScrutineeVariant::Or(scrutinees) => { let (first, others) = scrutinees .split_first() .expect("There must be at least two alternatives in TyScrutineeVariant::Or."); // For all other alternatives then the first (left-most) one, span a new branch and pass it as a left-most. // The new branch contains the identifiers collected so far in the left-most branch, // but without duplicates collected so far. We want to have only unique duplicates in each branch. for scrutinee in others { let mut branch: IndexMap<Ident, (bool, Vec<(bool, Span)>)> = left_most_branch .iter() .map(|(ident, (is_struct_field, _))| { ( ident.clone(), (*is_struct_field, Vec::<(bool, Span)>::new()), ) }) .collect(); recursively_collect_duplicate_variables(branches, &mut branch, scrutinee); branches.push(branch); } // The variables in the left-most alternative go to the original left-most branch. recursively_collect_duplicate_variables(branches, left_most_branch, first); } ty::TyScrutineeVariant::Tuple(scrutinees) => { for scrutinee in scrutinees { match &scrutinee.variant { ty::TyScrutineeVariant::Variable(ident) => { add_variable(left_most_branch, ident, false) } _ => recursively_collect_duplicate_variables( branches, left_most_branch, scrutinee, ), }; } } ty::TyScrutineeVariant::EnumScrutinee { value, .. } => { recursively_collect_duplicate_variables(branches, left_most_branch, value) } } fn add_variable( duplicate_variables: &mut IndexMap<Ident, (bool, Vec<MatchVariable>)>, ident: &Ident, is_struct_field: bool, ) { duplicate_variables .entry(ident.clone()) .and_modify(|(_, vec)| vec.push((is_struct_field, ident.span()))) .or_insert((is_struct_field, vec![])); } } } /// Returns [Ident]s for all match arm variables found in the `scrutinee`, /// together with the information if the variable is a struct field (true) /// or not (false), or empty [Vec] if there are no variables declared in /// the `scrutinee`. /// /// If the `scrutinee` contains alternatives, and thus a variable is declared /// multiple times, each occurrence of the variable will be returned. pub(crate) fn collect_match_pattern_variables(scrutinee: &TyScrutinee) -> Vec<(Ident, bool)> { let mut variables = vec![]; recursively_collect_variables(&mut variables, scrutinee); return variables; fn recursively_collect_variables(variables: &mut Vec<(Ident, bool)>, scrutinee: &TyScrutinee) { match &scrutinee.variant { ty::TyScrutineeVariant::CatchAll => (), ty::TyScrutineeVariant::Variable(ident) => variables.push((ident.clone(), false)), ty::TyScrutineeVariant::Literal(_) => (), ty::TyScrutineeVariant::Constant { .. } => (), ty::TyScrutineeVariant::StructScrutinee { fields, .. } => { // If a field does not have a scrutinee, the field itself is a variable. for field in fields { match &field.scrutinee { Some(scrutinee) => recursively_collect_variables(variables, scrutinee), None => variables.push((field.field.clone(), true)), } } } ty::TyScrutineeVariant::Or(scrutinees) => { for scrutinee in scrutinees { recursively_collect_variables(variables, scrutinee); } } ty::TyScrutineeVariant::Tuple(scrutinees) => { for scrutinee in scrutinees { recursively_collect_variables(variables, scrutinee); } } ty::TyScrutineeVariant::EnumScrutinee { value, .. } => { recursively_collect_variables(variables, value) } } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/witness_report.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/witness_report.rs
use std::fmt; use itertools::Itertools; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; use crate::CompileError; use super::{patstack::PatStack, pattern::Pattern}; /// A `WitnessReport` is a report of the witnesses to a `Pattern` being useful /// and is used in the match expression exhaustivity checking algorithm. #[derive(Debug)] pub(crate) enum WitnessReport { NoWitnesses, Witnesses(PatStack), } impl WitnessReport { /// Joins two `WitnessReport`s together. pub(crate) fn join_witness_reports(a: WitnessReport, b: WitnessReport) -> Self { match (a, b) { (WitnessReport::NoWitnesses, WitnessReport::NoWitnesses) => WitnessReport::NoWitnesses, (WitnessReport::NoWitnesses, WitnessReport::Witnesses(wits)) => { WitnessReport::Witnesses(wits) } (WitnessReport::Witnesses(wits), WitnessReport::NoWitnesses) => { WitnessReport::Witnesses(wits) } (WitnessReport::Witnesses(wits1), WitnessReport::Witnesses(mut wits2)) => { let mut wits = wits1; wits.append(&mut wits2); WitnessReport::Witnesses(wits) } } } /// Given a `WitnessReport` *wr* and a constructor *c* with *a* number of /// sub-patterns, creates a new `Pattern` *p* and a new `WitnessReport` /// *wr'*. *p* is created by applying *c* to the first *a* elements of *wr*. /// *wr'* is created by taking the remaining elements of *wr* after *a* /// elements have been removed from the front of *wr*. pub(crate) fn split_into_leading_constructor( handler: &Handler, witness_report: WitnessReport, c: &Pattern, span: &Span, ) -> Result<(Pattern, Self), ErrorEmitted> { match witness_report { WitnessReport::NoWitnesses => Err(handler.emit_err(CompileError::Internal( "expected to find witnesses to use as arguments to a constructor", span.clone(), ))), WitnessReport::Witnesses(witnesses) => { let (rs, ps) = witnesses.split_at(handler, c.a(), span)?; let pat = Pattern::from_constructor_and_arguments(handler, c, rs, span)?; Ok((pat, WitnessReport::Witnesses(ps))) } } } /// Prepends a witness `Pattern` onto the `WitnessReport`. pub(crate) fn add_witness( &mut self, handler: &Handler, witness: Pattern, span: &Span, ) -> Result<(), ErrorEmitted> { match self { WitnessReport::NoWitnesses => Err(handler.emit_err(CompileError::Internal( "expected to find witnesses", span.clone(), ))), WitnessReport::Witnesses(witnesses) => { witnesses.prepend(witness); Ok(()) } } } /// Reports if this `WitnessReport` has witnesses. pub(crate) fn has_witnesses(&self) -> bool { match self { WitnessReport::NoWitnesses => false, WitnessReport::Witnesses(_) => true, // !witnesses.is_empty() } } } impl fmt::Display for WitnessReport { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let witnesses = match self { WitnessReport::NoWitnesses => PatStack::empty(), WitnessReport::Witnesses(witnesses) => witnesses.clone(), }; let s = witnesses .flatten() .into_iter() .map(|x| format!("`{x}`")) .join(", "); write!(f, "{s}") } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/patstack.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/patstack.rs
use std::{cmp::Ordering, fmt, slice::Iter, vec::IntoIter}; use itertools::Itertools; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; use crate::CompileError; use super::pattern::Pattern; /// A `PatStack` is a `Vec<Pattern>` that is implemented with special methods /// particular to the match exhaustivity algorithm. #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct PatStack { pats: Vec<Pattern>, } impl PatStack { /// Creates an empty `PatStack`. pub(crate) fn empty() -> Self { PatStack { pats: vec![] } } /// Given a `Pattern` *p*, creates a `PatStack` with one element *p*. pub(crate) fn from_pattern(p: Pattern) -> Self { PatStack { pats: vec![p] } } /// Given a usize *n*, creates a `PatStack` filled with *n* /// `Pattern::Wildcard` elements. pub(crate) fn fill_wildcards(n: usize) -> Self { let mut pats = vec![]; for _ in 0..n { pats.push(Pattern::Wildcard); } PatStack { pats } } /// Returns the first element of a `PatStack`. pub(crate) fn first(&self, handler: &Handler, span: &Span) -> Result<Pattern, ErrorEmitted> { match self.pats.first() { Some(first) => Ok(first.to_owned()), None => Err(handler.emit_err(CompileError::Internal("empty PatStack", span.clone()))), } } /// Returns a tuple of the first element of a `PatStack` and the rest of the /// elements. pub(crate) fn split_first( &self, handler: &Handler, span: &Span, ) -> Result<(Pattern, PatStack), ErrorEmitted> { match self.pats.split_first() { Some((first, pat_stack_contents)) => { let pat_stack = PatStack { pats: pat_stack_contents.to_vec(), }; Ok((first.to_owned(), pat_stack)) } None => Err(handler.emit_err(CompileError::Internal("empty PatStack", span.clone()))), } } /// Given a usize *n*, splits the `PatStack` at *n* and returns both halves. pub(crate) fn split_at( &self, handler: &Handler, n: usize, span: &Span, ) -> Result<(PatStack, PatStack), ErrorEmitted> { if n > self.len() { return Err(handler.emit_err(CompileError::Internal( "attempting to split OOB", span.clone(), ))); } let (a, b) = self.pats.split_at(n); let x = PatStack { pats: a.to_vec() }; let y = PatStack { pats: b.to_vec() }; Ok((x, y)) } /// Pushes a `Pattern` onto the `PatStack` pub(crate) fn push(&mut self, other: Pattern) { self.pats.push(other) } /// Given a usize *n*, returns a mutable reference to the `PatStack` at /// index *n*. fn get_mut( &mut self, handler: &Handler, n: usize, span: &Span, ) -> Result<&mut Pattern, ErrorEmitted> { match self.pats.get_mut(n) { Some(elem) => Ok(elem), None => Err(handler.emit_err(CompileError::Internal( "can't retrieve mutable reference to element", span.clone(), ))), } } /// Appends a `PatStack` onto the `PatStack`. pub(crate) fn append(&mut self, others: &mut PatStack) { self.pats.append(&mut others.pats); } /// Prepends a `Pattern` onto the `PatStack`. pub(crate) fn prepend(&mut self, other: Pattern) { self.pats.insert(0, other); } /// Returns the length of the `PatStack`. pub(crate) fn len(&self) -> usize { self.pats.len() } /// Reports if the `PatStack` is empty. pub(crate) fn is_empty(&self) -> bool { self.flatten().filter_out_wildcards().pats.is_empty() } /// Reports if the `PatStack` contains a given `Pattern`. pub(crate) fn contains(&self, pat: &Pattern) -> bool { self.pats.contains(pat) } /// Reports if the `PatStack` contains an or-pattern at the top level. fn contains_or_pattern(&self) -> bool { for pat in self.pats.iter() { if let Pattern::Or(_) = pat { return true; } } false } pub(crate) fn iter(&self) -> Iter<'_, Pattern> { self.pats.iter() } /// Flattens the contents of a `PatStack` into a `PatStack`. pub(crate) fn flatten(&self) -> PatStack { let mut flattened = PatStack::empty(); for pat in self.pats.iter() { flattened.append(&mut pat.flatten()); } flattened } /// Orders a `PatStack` into a human-readable order. pub(crate) fn sort(self) -> PatStack { let mut sorted = self.pats; sorted.sort(); PatStack::from(sorted) } /// Returns the given `PatStack` with wildcard patterns filtered out. pub(crate) fn filter_out_wildcards(&self) -> PatStack { let mut pats = PatStack::empty(); for pat in self.pats.iter() { match pat { Pattern::Wildcard => {} pat => pats.push(pat.to_owned()), } } pats } /// Given a `PatStack` *args*, return a `Vec<PatStack>` *args*' /// "serialized" from *args*. /// /// Or-patterns are extracted to create a vec of `PatStack`s *args*' where /// each `PatStack` is a copy of *args* where the index of the or-pattern is /// instead replaced with one element from the or-patterns contents. More /// specifically, given an *args* with one or-pattern that contains n /// elements, this "serialization" would result in *args*' of length n. /// Given an *args* with two or-patterns that contain n elements and m /// elements, this would result in *args*' of length n*m. /// /// For example, given an *args*: /// /// ```ignore /// [ /// Pattern::Or([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]), /// Pattern::Wildcard /// ] /// ``` /// /// *args* would serialize to: /// /// ```ignore /// [ /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::Wildcard /// ], /// [ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::Wildcard /// ] /// ] /// ``` /// /// Or, given an *args*: /// /// ```ignore /// [ /// Pattern::Or([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 1, last: 1 }) /// ]), /// Pattern::Or([ /// Pattern::U64(Range { first: 2, last: 2 }), /// Pattern::U64(Range { first: 3, last: 3 }), /// Pattern::U64(Range { first: 4, last: 4 }), /// ]), /// ] /// ``` /// /// *args* would serialize to: /// /// ```ignore /// [ /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 2, last: 2 }) /// ], /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 3, last: 3 }) /// ], /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 4, last: 4 }) /// ], /// [ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::U64(Range { first: 2, last: 2 }) /// ], /// [ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::U64(Range { first: 3, last: 3 }) /// ], /// [ /// Pattern::U64(Range { first: 1, last: 1 }), /// Pattern::U64(Range { first: 4, last: 4 }) /// ], /// ] /// ``` pub(crate) fn serialize_multi_patterns( self, handler: &Handler, span: &Span, ) -> Result<Vec<PatStack>, ErrorEmitted> { let mut output: Vec<PatStack> = vec![]; let mut stack: Vec<PatStack> = vec![self]; while !stack.is_empty() { let top = match stack.pop() { Some(top) => top, None => { return Err( handler.emit_err(CompileError::Internal("can't pop Vec", span.clone())) ); } }; if !top.contains_or_pattern() { output.push(top); } else { for (i, pat) in top.clone().into_iter().enumerate() { if let Pattern::Or(elems) = pat { for elem in elems.into_iter() { let mut top = top.clone(); let r = top.get_mut(handler, i, span)?; let _ = std::mem::replace(r, elem); stack.push(top); } } } } } output.reverse(); Ok(output) } /// Orders a `PatStack` into a human-readable order. /// /// For error reporting only. pub(crate) fn remove_duplicates(self) -> PatStack { let mut new_pats = vec![]; for pat in self.pats.into_iter() { if !new_pats.contains(&pat) { new_pats.push(pat); } } PatStack::from(new_pats) } } impl IntoIterator for PatStack { type Item = Pattern; type IntoIter = IntoIter<Pattern>; fn into_iter(self) -> Self::IntoIter { self.pats.into_iter() } } impl From<Vec<Pattern>> for PatStack { fn from(pats: Vec<Pattern>) -> Self { PatStack { pats } } } impl fmt::Display for PatStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let s = self .flatten() .sort() .remove_duplicates() .into_iter() .map(|x| format!("{x}")) .join(", "); write!(f, "{s}") } } impl std::cmp::Ord for PatStack { fn cmp(&self, other: &Self) -> Ordering { let sorted_self = self.clone().sort(); let sorted_other = other.clone().sort(); sorted_self.pats.cmp(&sorted_other.pats) } } impl std::cmp::PartialOrd for PatStack { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/constructor_factory.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/constructor_factory.rs
use std::collections::HashSet; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span}; use crate::{decl_engine::DeclEngine, language::ty, type_system::TypeId, Engines, TypeInfo}; use super::{ patstack::PatStack, pattern::{EnumPattern, Pattern, StructPattern}, range::Range, }; pub(crate) struct ConstructorFactory { possible_types: Vec<TypeInfo>, } impl ConstructorFactory { pub(crate) fn new(engines: &Engines, type_id: TypeId) -> Self { let possible_types = type_id.extract_nested_types(engines); ConstructorFactory { possible_types } } /// Given Σ, computes a `Pattern` not present in Σ from the type of the /// elements of Σ. If more than one `Pattern` is found, these patterns are /// wrapped in an or-pattern. /// /// For example, given this Σ: /// /// ```ignore /// [ /// Pattern::U64(Range { first: std::u64::MIN, last: 3 }), /// Pattern::U64(Range { first: 16, last: std::u64::MAX }) /// ] /// ``` /// /// this would result in this `Pattern`: /// /// ```ignore /// Pattern::U64(Range { first: 4, last: 15 }) /// ``` /// /// Given this Σ (which is more likely to occur than the above example): /// /// ```ignore /// [ /// Pattern::U64(Range { first: 2, last: 3 }), /// Pattern::U64(Range { first: 16, last: 17 }) /// ] /// ``` /// /// this would result in this `Pattern`: /// /// ```ignore /// Pattern::Or([ /// Pattern::U64(Range { first: std::u64::MIN, last: 1 }), /// Pattern::U64(Range { first: 4, last: 15 }), /// Pattern::U64(Range { first: 18, last: std::u64::MAX }) /// ]) /// ``` pub(crate) fn create_pattern_not_present( &self, handler: &Handler, engines: &Engines, sigma: PatStack, span: &Span, ) -> Result<Pattern, ErrorEmitted> { let (first, rest) = sigma .flatten() .filter_out_wildcards() .split_first(handler, span)?; let pat = match first { Pattern::U8(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U8(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } let unincluded: PatStack = Range::find_exclusionary_ranges(handler, ranges, Range::u8(), span)? .into_iter() .map(Pattern::U8) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, unincluded, span)? } Pattern::U16(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U16(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } let unincluded: PatStack = Range::find_exclusionary_ranges(handler, ranges, Range::u16(), span)? .into_iter() .map(Pattern::U16) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, unincluded, span)? } Pattern::U32(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U32(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } let unincluded: PatStack = Range::find_exclusionary_ranges(handler, ranges, Range::u32(), span)? .into_iter() .map(Pattern::U32) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, unincluded, span)? } Pattern::U64(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U64(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } let unincluded: PatStack = Range::find_exclusionary_ranges(handler, ranges, Range::u64(), span)? .into_iter() .map(Pattern::U64) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, unincluded, span)? } Pattern::Numeric(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::Numeric(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } let unincluded: PatStack = Range::find_exclusionary_ranges(handler, ranges, Range::u64(), span)? .into_iter() .map(Pattern::Numeric) .collect::<Vec<_>>() .into(); Pattern::from_pat_stack(handler, unincluded, span)? } // we will not present every string case Pattern::String(_) => Pattern::Wildcard, Pattern::Wildcard => Pattern::Wildcard, // we will not present every b256 case Pattern::B256(_) => Pattern::Wildcard, Pattern::Boolean(b) => { let mut true_found = false; let mut false_found = false; if b { true_found = true; } else { false_found = true; } if rest.contains(&Pattern::Boolean(true)) { true_found = true; } else if rest.contains(&Pattern::Boolean(false)) { false_found = true; } if true_found && false_found { return Err(handler.emit_err(CompileError::Internal( "unable to create a new pattern", span.clone(), ))); } else if true_found { Pattern::Boolean(false) } else { Pattern::Boolean(true) } } Pattern::Struct(struct_pattern) => { let fields = struct_pattern .fields() .iter() .map(|(name, _)| (name.clone(), Pattern::Wildcard)) .collect::<Vec<_>>(); Pattern::Struct(StructPattern::new( struct_pattern.struct_name().clone(), fields, )) } ref pat @ Pattern::Enum(ref enum_pattern) => { let type_info = self.resolve_possible_types(handler, pat, span, engines.de())?; let enum_decl = engines .de() .get_enum(&type_info.expect_enum(handler, engines, "", span)?); let enum_name = &enum_decl.call_path.suffix; let enum_variants = &enum_decl.variants; let (all_variants, variant_tracker) = ConstructorFactory::resolve_enum( handler, enum_name, enum_variants, enum_pattern, rest, span, )?; Pattern::from_pat_stack( handler, PatStack::from( all_variants .difference(&variant_tracker) .map(|x| { Pattern::Enum(EnumPattern { enum_name: enum_name.to_string(), variant_name: x.clone(), value: Box::new(Pattern::Wildcard), }) }) .collect::<Vec<_>>(), ), span, )? } Pattern::Tuple(elems) => Pattern::Tuple(PatStack::fill_wildcards(elems.len())), Pattern::Or(elems) => { let mut pat_stack = PatStack::empty(); for pat in elems.into_iter() { pat_stack.push(self.create_pattern_not_present( handler, engines, PatStack::from_pattern(pat), span, )?); } Pattern::from_pat_stack(handler, pat_stack, span)? } }; Ok(pat) } /// Reports if the `PatStack` Σ is a "complete signature" of the type of the /// elements of Σ. /// /// For example, a Σ composed of `Pattern::U64(..)`s would need to check for /// if it is a complete signature for the `U64` pattern type. Versus a Σ /// composed of `Pattern::Tuple([.., ..])` which would need to check for if /// it is a complete signature for "`Tuple` with 2 sub-patterns" type. /// /// There are several rules with which to determine if Σ is a complete /// signature: /// /// 1. If Σ is empty it is not a complete signature. /// 2. If Σ contains only wildcard patterns, it is not a complete signature. /// 3. If Σ contains all constructors for the type of the elements of Σ then /// it is a complete signature. /// /// For example, given this Σ: /// /// ```ignore /// [ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::U64(Range { first: 7, last: 7 }) /// ] /// ``` /// /// this would not be a complete signature as it does not contain all /// elements from the `U64` type. /// /// Given this Σ: /// /// ```ignore /// [ /// Pattern::U64(Range { first: std::u64::MIN, last: std::u64::MAX }) /// ] /// ``` /// /// this would be a complete signature as it does contain all elements from /// the `U64` type. /// /// Given this Σ: /// /// ```ignore /// [ /// Pattern::Tuple([ /// Pattern::U64(Range { first: 0, last: 0 }), /// Pattern::Wildcard /// ]), /// ] /// ``` /// /// this would also be a complete signature as it does contain all elements /// from the "`Tuple` with 2 sub-patterns" type. pub(crate) fn is_complete_signature( &self, handler: &Handler, engines: &Engines, pat_stack: &PatStack, span: &Span, ) -> Result<bool, ErrorEmitted> { // flatten or patterns let pat_stack = pat_stack .clone() .serialize_multi_patterns(handler, span)? .into_iter() .fold(PatStack::empty(), |mut acc, mut pats| { acc.append(&mut pats); acc }); if pat_stack.is_empty() { return Ok(false); } if pat_stack.contains(&Pattern::Wildcard) { return Ok(true); } let (first, mut rest) = pat_stack.split_first(handler, span)?; match first { // its assumed that no one is ever going to list every string Pattern::String(_) => Ok(false), // its assumed that no one is ever going to list every B256 Pattern::B256(_) => Ok(false), Pattern::U8(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U8(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Range::do_ranges_equal_range(handler, ranges, Range::u8(), span) } Pattern::U16(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U16(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Range::do_ranges_equal_range(handler, ranges, Range::u16(), span) } Pattern::U32(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U32(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Range::do_ranges_equal_range(handler, ranges, Range::u32(), span) } Pattern::U64(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::U64(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Range::do_ranges_equal_range(handler, ranges, Range::u64(), span) } Pattern::Numeric(range) => { let mut ranges = vec![range]; for pat in rest.into_iter() { match pat { Pattern::Numeric(range) => ranges.push(range), _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Range::do_ranges_equal_range(handler, ranges, Range::u64(), span) } Pattern::Boolean(b) => { let mut true_found = false; let mut false_found = false; match b { true => true_found = true, false => false_found = true, } for pat in rest.iter() { match pat { Pattern::Boolean(b) => match b { true => true_found = true, false => false_found = true, }, _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Ok(true_found && false_found) } ref pat @ Pattern::Enum(ref enum_pattern) => { let type_info = self.resolve_possible_types(handler, pat, span, engines.de())?; let enum_decl = engines .de() .get_enum(&type_info.expect_enum(handler, engines, "", span)?); let enum_name = &enum_decl.call_path.suffix; let enum_variants = &enum_decl.variants; let (all_variants, variant_tracker) = ConstructorFactory::resolve_enum( handler, enum_name, enum_variants, enum_pattern, rest, span, )?; Ok(all_variants.difference(&variant_tracker).next().is_none()) } ref tup @ Pattern::Tuple(_) => { for pat in rest.iter() { if !pat.has_the_same_constructor(tup) { return Ok(false); } } Ok(true) } ref strct @ Pattern::Struct(_) => { for pat in rest.iter() { if !pat.has_the_same_constructor(strct) { return Ok(false); } } Ok(true) } Pattern::Wildcard => Err(handler.emit_err(CompileError::Internal( "expected the wildcard pattern to be filtered out here", span.clone(), ))), Pattern::Or(mut elems) => { elems.append(&mut rest); Ok(self.is_complete_signature(handler, engines, &elems, span)?) } } } fn resolve_possible_types( &self, handler: &Handler, pattern: &Pattern, span: &Span, decl_engine: &DeclEngine, ) -> Result<&TypeInfo, ErrorEmitted> { let mut type_info = None; for possible_type in self.possible_types.iter() { let matches = pattern.matches_type_info(possible_type, decl_engine); if matches { type_info = Some(possible_type); break; } } match type_info { Some(type_info) => Ok(type_info), None => Err(handler.emit_err(CompileError::Internal( "there is no type that matches this pattern", span.clone(), ))), } } fn resolve_enum( handler: &Handler, enum_name: &Ident, enum_variants: &[ty::TyEnumVariant], enum_pattern: &EnumPattern, rest: PatStack, span: &Span, ) -> Result<(HashSet<String>, HashSet<String>), ErrorEmitted> { if enum_pattern.enum_name.as_str() != enum_name.as_str() { return Err(handler.emit_err(CompileError::Internal( "expected matching enum names", span.clone(), ))); } let mut all_variants: HashSet<String> = HashSet::new(); for variant in enum_variants.iter() { all_variants.insert(variant.name.to_string().clone()); } let mut variant_tracker: HashSet<String> = HashSet::new(); variant_tracker.insert(enum_pattern.variant_name.clone()); for pat in rest.iter() { match pat { Pattern::Enum(enum_pattern2) => { if enum_pattern2.enum_name.as_str() != enum_name.as_str() { return Err(handler.emit_err(CompileError::Internal( "expected matching enum names", span.clone(), ))); } variant_tracker.insert(enum_pattern2.variant_name.to_string()); } _ => { return Err(handler.emit_err(CompileError::Internal( "expected all patterns to be of the same type", span.clone(), ))); } } } Ok((all_variants, variant_tracker)) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/mod.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/mod.rs
mod constructor_factory; mod match_pattern_variables; mod matrix; mod patstack; mod pattern; mod range; mod reachable_report; mod usefulness; mod witness_report; pub(crate) use match_pattern_variables::{ collect_duplicate_match_pattern_variables, collect_match_pattern_variables, }; pub(in crate::semantic_analysis::ast_node::expression) use reachable_report::ReachableReport; pub(crate) use usefulness::check_match_expression_usefulness;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/usefulness.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/usefulness.rs
use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::Span; use crate::{language::ty, type_system::TypeId, Engines}; use super::{ constructor_factory::ConstructorFactory, matrix::Matrix, patstack::PatStack, pattern::Pattern, reachable_report::ReachableReport, witness_report::WitnessReport, }; /// Given the arms of a match expression, checks to see if the arms are /// exhaustive and checks to see if each arm is reachable. /// /// --- /// /// Modeled after this paper: /// http://moscova.inria.fr/%7Emaranget/papers/warn/warn004.html /// /// Implemented in Rust here: /// https://doc.rust-lang.org/1.75.0/nightly-rustc/rustc_mir_build/thir/pattern/usefulness/index.html /// /// --- /// /// In general, match expressions are constructed as so: /// /// ```ignore /// match value { /// pattern => result, /// pattern => result, /// pattern => result /// } /// ``` /// /// where `value` is the "matched value", and each `pattern => result` is a /// "match arm", and `value` will "match" one of the `patterns` in the match /// arms. A match happens when a `pattern` has the same "type" and "shape" as /// the `value`, at some level of generality. For example `1` will match `1`, /// `a`, and `_`, but will not match `2`. /// /// The goal of this algorithm is to: /// 1. Check to see if the arms are exhaustive (i.e. all cases for which the /// matched value could be are included in the provided arms) /// 2. Check to see if each arm is reachable (i.e. if each arm is able to /// "catch" at least on hypothetical matched value without the previous arms /// "catching" all the values) /// /// # `Pattern` /// /// A `Pattern` is an object that is able to be matched upon. A `Pattern` is /// semantically constructed of a "constructor" and its "arguments". For /// example, given the tuple `(1,2)` "a tuple with 2 elements" is the /// constructor and "1, 2" are the arguments. Given the u64 `2`, "2" is the /// constructor and it has no arguments (you can think of this by imagining /// that u64 is the enum type and each u64 value is a variant of that enum type, /// making the value itself a constructor). /// /// `Pattern`s are semantically categorized into three categories: wildcard /// patterns (the catchall pattern `_` and variable binding patterns like `a`), /// constructed patterns (`(1,2)` aka "a tuple with 2 elements" with arguments /// "1, 2"), and or-patterns (`1 | 2 | .. `). /// /// `Pattern`s are used in the exhaustivity algorithm. /// /// # Usefulness /// /// A pattern is "useful" when it covers at least one case of a possible /// matched value that had been left uncovered by previous patterns. /// /// For example, given: /// /// ```ignore /// let x = true; /// match x { /// true => .., /// false => .. /// } /// ``` /// /// the pattern `false` is useful because it covers at least one case (i.e. /// `false`) that had been left uncovered by the previous patterns. /// /// Given: /// /// ```ignore /// let x = 5; /// match x { /// 0 => .., /// 1 => .., /// _ => .. /// } /// ``` /// /// the pattern `_` is useful because it covers at least one case (i.e. all /// cases other than 0 and 1) that had been left uncovered by the previous /// patterns. /// /// In another example, given: /// /// ```ignore /// let x = 5; /// match x { /// 0 => .., /// 1 => .., /// 1 => .., // <-- /// _ => .. /// } /// ``` /// /// the pattern `1` (noted with an arrow) is not useful as it does not cover any /// case that is not already covered by a previous pattern. /// /// Given: /// /// ```ignore /// let x = 5; /// match x { /// 0 => .., /// 1 => .., /// _ => .., /// 2 => .. // <-- /// } /// ``` /// /// the pattern `2` is not useful as it does not cover any case that is not /// already covered by a previous pattern. Even though there is only one pattern /// `2`, any cases that the pattern `2` covers would previously be caught by the /// catchall pattern. /// /// Usefulness is used in the exhaustivity algorithm. /// /// # Witnesses /// /// A "witness" to a pattern is a concrete example of a matched value that would /// be caught by that pattern that would not have been caught by previous /// patterns. /// /// For example, given: /// /// ```ignore /// let x = 5; /// match x { /// 0 => .., /// 1 => .., /// _ => .. /// } /// ``` /// /// the witness for pattern `1` would be the value "1" as the pattern `1` /// would catch the concrete hypothetical matched value "1" and no other /// previous cases would have caught it. The witness for pattern `_` is an /// or-pattern of all of the remaining integers they wouldn't be caught by `0` /// and `1`, so "2 | .. | MAX". /// /// Given: /// /// ```ignore /// let x = 5; /// match x { /// 0 => .., /// 1 => .., /// 1 => .., // <-- /// _ => .. /// } /// ``` /// /// the pattern `1` (noted with an arrow) would not have any witnesses /// that it catches that are not caught by previous patterns. /// /// # Putting it all together /// /// Given the definitions above, we can say several things: /// /// 1. A pattern is useful when it has witnesses to its usefulness (i.e. it has /// at least one hypothetical value that it catches that is not caught by /// previous patterns). /// 2. A match arm is reachable when its pattern is useful. /// 3. A match expression is exhaustive when, if you add an additional wildcard /// pattern to the existing patterns, this new wildcard pattern is not /// useful. /// /// # Details /// /// This algorithm checks if a match expression is exhaustive and if its match /// arms are reachable by applying the above definitions of usefulness and /// witnesses. This algorithm sequentially creates a [WitnessReport] for every /// match arm by calling *U(P, q)*, where *P* is the [Matrix] of patterns seen /// so far and *q* is the current pattern under investigation for its /// reachability. A match arm is reachable if its `WitnessReport` is non-empty. /// Once all existing match arms have been analyzed, the match expression is /// analyzed for its exhaustivity. *U(P, q)* is called again to create another /// `WitnessReport`, this time where *P* is the `Matrix` of all patterns and `q` /// is an imaginary additional wildcard pattern. The match expression is /// exhaustive if the imaginary additional wildcard pattern has an empty /// `WitnessReport`. pub(crate) fn check_match_expression_usefulness( handler: &Handler, engines: &Engines, type_id: TypeId, scrutinees: Vec<ty::TyScrutinee>, span: Span, ) -> Result<(WitnessReport, Vec<ReachableReport>), ErrorEmitted> { let mut matrix = Matrix::empty(); let mut arms_reachability = vec![]; // If the provided type does not have a valid constructor and there are no // branches in the match expression (i.e. no scrutinees to check), then // every scrutinee (i.e. 0 scrutinees) are useful! We return early in this // case. if !engines .te() .get(type_id) .has_valid_constructor(engines.de()) && scrutinees.is_empty() { let witness_report = WitnessReport::NoWitnesses; let arms_reachability = vec![]; return Ok((witness_report, arms_reachability)); } let factory = ConstructorFactory::new(engines, type_id); for scrutinee in scrutinees.into_iter() { let pat = Pattern::from_scrutinee(scrutinee.clone()); let v = PatStack::from_pattern(pat); let witness_report = is_useful(handler, engines, &factory, &matrix, &v, &span)?; matrix.push(v); // if an arm has witnesses to its usefulness then it is reachable arms_reachability.push(ReachableReport::new( witness_report.has_witnesses(), scrutinee, )); } let v = PatStack::from_pattern(Pattern::wild_pattern()); let witness_report = is_useful(handler, engines, &factory, &matrix, &v, &span)?; // if a wildcard case has no witnesses to its usefulness, then the match arms are exhaustive Ok((witness_report, arms_reachability)) } /// Given a `Matrix` *P* and a `PatStack` *q*, computes a `WitnessReport` from /// algorithm *U(P, q)*. /// /// This recursive algorithm is basically an induction proof with 2 base cases. /// The first base case is when *P* is the empty `Matrix`. In this case, we /// return a witness report where the witnesses are wildcard patterns for every /// element of *q*. The second base case is when *P* has at least one row but /// does not have any columns. In this case, we return a witness report with no /// witnesses. This case indicates exhaustivity. The induction case covers /// everything else, and what we do for induction depends on what the first /// element of *q* is. Depending on if the first element of *q* is a wildcard /// pattern, or-pattern, or constructed pattern we do something different. Each /// case returns a witness report that we propagate through the recursive steps. fn is_useful( handler: &Handler, engines: &Engines, factory: &ConstructorFactory, p: &Matrix, q: &PatStack, span: &Span, ) -> Result<WitnessReport, ErrorEmitted> { let (m, n) = p.m_n(handler, span)?; match (m, n) { (0, 0) => Ok(WitnessReport::Witnesses(PatStack::fill_wildcards(q.len()))), (_, 0) => Ok(WitnessReport::NoWitnesses), (_, _) => { let c = q.first(handler, span)?; let witness_report = match c { Pattern::Wildcard => is_useful_wildcard(handler, engines, factory, p, q, span)?, Pattern::Or(pats) => is_useful_or(handler, engines, factory, p, q, pats, span)?, c => is_useful_constructed(handler, engines, factory, p, q, c, span)?, }; Ok(witness_report) } } } /// Computes a witness report from *U(P, q)* when *q* is a wildcard pattern. /// /// Because *q* is a wildcard pattern, this means we are checking to see if the /// wildcard pattern is useful given *P*. We can do this by investigating the /// first column Σ of *P*. If Σ is a complete signature (that is if Σ contains /// every constructor for the type of elements in Σ), then we can recursively /// compute the witnesses for every element of Σ and aggregate them. If Σ is not /// a complete signature, then we can compute the default `Matrix` for *P* (i.e. /// a version of *P* that is agnostic to *c*) and recursively compute the /// witnesses for if q is useful given the new default `Matrix`. /// /// --- /// /// 1. Compute Σ = {c₁, ... , cₙ}, which is the set of constructors that appear /// as root constructors of the patterns of *P*'s first column. /// 2. Determine if Σ is a complete signature. /// 3. If it is a complete signature: /// 1. For every every *k* 0..*n*, compute the specialized `Matrix` /// *S(cₖ, P)* /// 2. Compute the specialized `Matrix` *S(cₖ, q)* /// 3. Recursively compute U(S(cₖ, P), S(cₖ, q)) /// 4. If the recursive call to (3.3) returns a non-empty witness report, /// create a new pattern from *cₖ* and the witness report and a create a /// new witness report from the elements not used to create the new /// pattern /// 5. Aggregate a new patterns and new witness reports from every call of /// (3.4) /// 6. Transform the aggregated patterns from (3.5) into a single pattern /// and prepend it to the aggregated witness report /// 7. Return the witness report /// 4. If it is not a complete signature: /// 1. Compute the default `Matrix` *D(P)* /// 2. Compute *q'* as \[q₂ ... qₙ*\]. /// 3. Recursively compute *U(D(P), q')*. /// 4. If Σ is empty, create a pattern not present in Σ /// 5. Add this new pattern to the resulting witness report /// 6. Return the witness report fn is_useful_wildcard( handler: &Handler, engines: &Engines, factory: &ConstructorFactory, p: &Matrix, q: &PatStack, span: &Span, ) -> Result<WitnessReport, ErrorEmitted> { // 1. Compute Σ = {c₁, ... , cₙ}, which is the set of constructors that appear // as root constructors of the patterns of *P*'s first column. let sigma = p.compute_sigma(handler, span)?; // 2. Determine if Σ is a complete signature. let is_complete_signature = factory.is_complete_signature(handler, engines, &sigma, span)?; if is_complete_signature { // 3. If it is a complete signature: let mut witness_report = WitnessReport::NoWitnesses; let mut pat_stack = PatStack::empty(); for c_k in sigma.iter() { // 3.1. For every every *k* 0..*n*, compute the specialized `Matrix` // *S(cₖ, P)* let s_c_k_p = compute_specialized_matrix(handler, c_k, p, q, span)?; // 3.2. Compute the specialized `Matrix` *S(cₖ, q)* let s_c_k_q = compute_specialized_matrix( handler, c_k, &Matrix::from_pat_stack(q.clone()), q, span, )?; // *S(cₖ, q)* may have multiple rows in the case of a or pattern // in that case we define: *U(P,((r1∣r2) q2...qn)) = U(P,(r1 q2...qn)) ∨ U(P,(r2 q2...qn))* let mut wr = WitnessReport::NoWitnesses; for s_c_k_q in s_c_k_q.rows() { // 3.3. Recursively compute U(S(cₖ, P), S(cₖ, q)) let new_wr = is_useful(handler, engines, factory, &s_c_k_p, s_c_k_q, span)?; wr = WitnessReport::join_witness_reports(wr, new_wr); } // 3.4. If the recursive call to (3.3) returns a non-empty witness report, // create a new pattern from *cₖ* and the witness report and a create a // new witness report from the elements not used to create the new // pattern // 3.5. Aggregate the new patterns and new witness reports from every call of // (3.4) match (&witness_report, wr) { (WitnessReport::NoWitnesses, WitnessReport::NoWitnesses) => {} (WitnessReport::Witnesses(_), WitnessReport::NoWitnesses) => {} (WitnessReport::NoWitnesses, wr @ WitnessReport::Witnesses(_)) => { let (pat, wr) = WitnessReport::split_into_leading_constructor(handler, wr, c_k, span)?; if !pat_stack.contains(&pat) { pat_stack.push(pat); } witness_report = wr; } (_, wr) => { let (pat, wr) = WitnessReport::split_into_leading_constructor(handler, wr, c_k, span)?; if !pat_stack.contains(&pat) { pat_stack.push(pat); } witness_report = WitnessReport::join_witness_reports(witness_report, wr); } } } // 3.6. Transform the aggregated patterns from (3.5) into a single pattern // and prepend it to the aggregated witness report match &mut witness_report { WitnessReport::NoWitnesses => {} witness_report => { let pat_stack = Pattern::from_pat_stack(handler, pat_stack, span)?; witness_report.add_witness(handler, pat_stack, span)? } } // 7. Return the witness report Ok(witness_report) } else { // 4. If it is not a complete signature: // 4.1. Compute the default `Matrix` *D(P)* let d_p = compute_default_matrix(handler, p, q, span)?; // 4.2. Compute *q'* as \[q₂ ... qₙ*\]. let (_, q_rest) = q.split_first(handler, span)?; // 4.3. Recursively compute *U(D(P), q')*. let mut witness_report = is_useful(handler, engines, factory, &d_p, &q_rest, span)?; // 4.4. If Σ is empty, create a pattern not present in Σ let witness_to_add = if sigma.is_empty() { Pattern::Wildcard } else { factory.create_pattern_not_present(handler, engines, sigma, span)? }; // 4.5. Add this new pattern to the resulting witness report match &mut witness_report { WitnessReport::NoWitnesses => {} witness_report => witness_report.add_witness(handler, witness_to_add, span)?, } // 4.6. Return the witness report Ok(witness_report) } } /// Computes a witness report from *U(P, q)* when *q* is a constructed pattern /// *c(r₁, ..., rₐ)*. /// /// Given a specialized `Matrix` that specializes *P* to *c* and another /// specialized `Matrix` that specializes *q* to *c*, recursively compute if the /// latter `Matrix` is useful to the former. /// /// --- /// /// 1. Extract the specialized `Matrix` *S(c, P)* /// 2. Extract the specialized `Matrix` *S(c, q)* /// 3. Recursively compute *U(S(c, P), S(c, q))* fn is_useful_constructed( handler: &Handler, engines: &Engines, factory: &ConstructorFactory, p: &Matrix, q: &PatStack, c: Pattern, span: &Span, ) -> Result<WitnessReport, ErrorEmitted> { // 1. Extract the specialized `Matrix` *S(c, P)* let s_c_p = compute_specialized_matrix(handler, &c, p, q, span)?; // 2. Extract the specialized `Matrix` *S(c, q)* let s_c_q = compute_specialized_matrix(handler, &c, &Matrix::from_pat_stack(q.clone()), q, span)?; // *S(c, q)* may have multiple rows in the case of a or pattern // in that case we define: *U(P,((r1∣r2) q2...qn)) = U(P,(r1 q2...qn)) ∨ U(P,(r2 q2...qn))* let mut witness_report = WitnessReport::NoWitnesses; for s_c_q in s_c_q.rows() { // 3. Recursively compute *U(S(c, P), S(c, q))* let wr = is_useful(handler, engines, factory, &s_c_p, s_c_q, span)?; witness_report = WitnessReport::join_witness_reports(witness_report, wr); } Ok(witness_report) } /// Computes a witness report from *U(P, q)* when *q* is an or-pattern /// *(r₁ | ... | rₐ)*. /// /// Compute the witness report for each element of q and aggregate them /// together. /// /// --- /// /// 1. For each *k* 0..*a* compute *q'* as \[*rₖ q₂ ... qₙ*\]. /// 2. Compute the witnesses from *U(P, q')* /// 3. Aggregate the witnesses from every *U(P, q')* fn is_useful_or( handler: &Handler, engines: &Engines, factory: &ConstructorFactory, p: &Matrix, q: &PatStack, pats: PatStack, span: &Span, ) -> Result<WitnessReport, ErrorEmitted> { let (_, q_rest) = q.split_first(handler, span)?; let mut p = p.clone(); let mut witness_report = WitnessReport::NoWitnesses; for pat in pats.into_iter() { // 1. For each *k* 0..*a* compute *q'* as \[*rₖ q₂ ... qₙ*\]. let mut v = PatStack::from_pattern(pat); v.append(&mut q_rest.clone()); // 2. Compute the witnesses from *U(P, q')* let wr = is_useful(handler, engines, factory, &p, &v, span)?; p.push(v); // 3. Aggregate the witnesses from every *U(P, q')* witness_report = WitnessReport::join_witness_reports(witness_report, wr); } Ok(witness_report) } /// Given a `Matrix` *P*, constructs the default `Matrix` *D(P). This is done by /// sequentially computing the rows of *D(P)*. /// /// Intuition: A default `Matrix` is a transformation upon *P* that "shrinks" /// the rows of *P* depending on if the row is able to generally match all /// patterns in a default case. fn compute_default_matrix( handler: &Handler, p: &Matrix, q: &PatStack, span: &Span, ) -> Result<Matrix, ErrorEmitted> { let mut d_p = Matrix::empty(); for p_i in p.rows().iter() { d_p.append(&mut compute_default_matrix_row(handler, p_i, q, span)?); } let (m, n) = d_p.m_n(handler, span)?; if m > 0 && n != (q.len() - 1) { return Err(handler.emit_err(CompileError::Internal( "D(P) matrix is misshapen", span.clone(), ))); } Ok(d_p) } /// Given a `PatStack` *pⁱ* from `Matrix` *P*, compute the resulting row of the /// default `Matrix` *D(P)*. /// /// A row in the default `Matrix` "shrinks itself" or "eliminates itself" /// depending on if its possible to make general claims the first element of the /// row *pⁱ₁*. It is possible to make a general claim *pⁱ₁* when *pⁱ₁* is the /// wildcard pattern (in which case it could match anything) and when *pⁱ₁* is /// an or-pattern (in which case we can do recursion while pretending that the /// or-pattern is itself a `Matrix`). A row "eliminates itself" when *pⁱ₁* is a /// constructed pattern (in which case it could only make a specific constructed /// pattern and we could not make any general claims about it). /// /// --- /// /// Rows are defined according to the first component of the row: /// /// 1. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)*: /// 1. no row is produced /// 2. *pⁱ₁* is a wildcard pattern: /// 1. the resulting row equals \[pⁱ₂ ... pⁱₙ*\] /// 3. *pⁱ₁* is an or-pattern *(r₁ | ... | rₐ)*: /// 1. Construct a new `Matrix` *P'*, where given *k* 0..*a*, the rows of /// *P'* are defined as \[*rₖ pⁱ₂ ... pⁱₙ*\] for every *k*. /// 2. The resulting rows are the rows obtained from calling the recursive /// *D(P')* fn compute_default_matrix_row( handler: &Handler, p_i: &PatStack, q: &PatStack, span: &Span, ) -> Result<Vec<PatStack>, ErrorEmitted> { let mut rows: Vec<PatStack> = vec![]; let (p_i_1, mut p_i_rest) = p_i.split_first(handler, span)?; match p_i_1 { Pattern::Wildcard => { // 2. *pⁱ₁* is a wildcard pattern: // 1. the resulting row equals \[pⁱ₂ ... pⁱₙ*\] let mut row = PatStack::empty(); row.append(&mut p_i_rest); rows.push(row); } Pattern::Or(pats) => { // 3. *pⁱ₁* is an or-pattern *(r₁ | ... | rₐ)*: // 1. Construct a new `Matrix` *P'*, where given *k* 0..*a*, the rows of // *P'* are defined as \[*rₖ pⁱ₂ ... pⁱₙ*\] for every *k*. let mut m = Matrix::empty(); for pat in pats.iter() { let mut m_row = PatStack::from_pattern(pat.clone()); m_row.append(&mut p_i_rest.clone()); m.push(m_row); } // 2. The resulting rows are the rows obtained from calling the recursive // *D(P')* let d_p = compute_default_matrix(handler, &m, q, span)?; rows.append(&mut d_p.into_rows()); } // 1. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)*: // 1. no row is produced _ => {} } Ok(rows) } /// Given a constructor *c* and a `Matrix` *P*, constructs the specialized /// `Matrix` *S(c, P)*. This is done by sequentially computing the rows of /// *S(c, P)*. /// /// Intuition: A specialized `Matrix` is a transformation upon *P* that /// "unwraps" the rows of *P* depending on if they are congruent with *c*. fn compute_specialized_matrix( handler: &Handler, c: &Pattern, p: &Matrix, q: &PatStack, span: &Span, ) -> Result<Matrix, ErrorEmitted> { let mut s_c_p = Matrix::empty(); if let Pattern::Or(cpats) = c { for cpat in cpats.iter() { let mut rows = compute_specialized_matrix(handler, cpat, p, q, span)?.into_rows(); s_c_p.append(&mut rows); } return Ok(s_c_p); } for p_i in p.rows().iter() { s_c_p.append(&mut compute_specialized_matrix_row( handler, c, p_i, q, span, )?); } let (m, n) = s_c_p.m_n(handler, span)?; if m > 0 && n != (c.a() + q.len() - 1) { return Err(handler.emit_err(CompileError::Internal( "S(c,P) matrix is misshapen", span.clone(), ))); } Ok(s_c_p) } /// Given a constructor *c* and a `PatStack` *pⁱ* from `Matrix` *P*, compute the /// resulting row of the specialized `Matrix` *S(c, P)*. /// /// Intuition: a row in the specialized [Matrix] "expands itself" or "eliminates /// itself" depending on if its possible to further "drill down" into the /// elements of *P* given a *c* that we are specializing for. It is possible to /// "drill down" when the first element of a row of *P* *pⁱ₁* matches *c* (in /// which case it is possible to "drill down" into the arguments for *pⁱ₁*), /// when *pⁱ₁* is the wildcard case (in which case it is possible to "drill /// down" into "fake" arguments for *pⁱ₁* as it does not matter if *c* matches /// or not), and when *pⁱ₁* is an or-pattern (in which case we can do recursion /// while pretending that the or-pattern is itself a `Matrix`). A row /// "eliminates itself" when *pⁱ₁* does not match *c* (in which case it is not /// possible to "drill down"). /// /// --- /// /// Rows are defined according to the first component of the row: /// /// 1. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)* where *c* == *c'*: /// 1. the resulting row equals \[*r₁ ... rₐ pⁱ₂ ... pⁱₙ*\] /// 2. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)* where *c* != *c'*: /// 1. no row is produced /// 3. *pⁱ₁* is a wildcard pattern and the number of sub-patterns in *c* is *a*: /// 1. the resulting row equals \[*_₁ ... _ₐ pⁱ₂ ... pⁱₙ*\] /// 4. *pⁱ₁* is an or-pattern *(r₁ | ... | rₐ)*: /// 1. Construct a new `Matrix` *P'* where, given *k* 0..*a*, the rows of /// *P'* are defined as \[*rₖ pⁱ₂ ... pⁱₙ*\] for every *k* /// 2. The resulting rows are the rows obtained from calling the recursive /// *S(c, P')* fn compute_specialized_matrix_row( handler: &Handler, c: &Pattern, p_i: &PatStack, q: &PatStack, span: &Span, ) -> Result<Vec<PatStack>, ErrorEmitted> { let mut rows: Vec<PatStack> = vec![]; let (p_i_1, mut p_i_rest) = p_i.split_first(handler, span)?; match p_i_1 { Pattern::Wildcard => { // 3. *pⁱ₁* is a wildcard pattern and the number of sub-patterns in *c* is *a*: // 3.1. the resulting row equals \[*_₁ ... _ₐ pⁱ₂ ... pⁱₙ*\] let mut row: PatStack = PatStack::fill_wildcards(c.a()); row.append(&mut p_i_rest); rows.push(row); } Pattern::Or(pats) => { // 4. *pⁱ₁* is an or-pattern *(r₁ | ... | rₐ)*: // 4.1. Construct a new `Matrix` *P'* where, given *k* 0..*a*, the rows of // *P'* are defined as \[*rₖ pⁱ₂ ... pⁱₙ*\] for every *k* let mut m = Matrix::empty(); for pat in pats.iter() { let mut m_row = PatStack::from_pattern(pat.clone()); m_row.append(&mut p_i_rest.clone()); m.push(m_row); } // 4.2. The resulting rows are the rows obtained from calling the recursive // *S(c, P')* let s_c_p = compute_specialized_matrix(handler, c, &m, q, span)?; rows.append(&mut s_c_p.into_rows()); } other => { if c.has_the_same_constructor(&other) { // 1. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)* where *c* == *c'*: // 1.1. the resulting row equals \[*r₁ ... rₐ pⁱ₂ ... pⁱₙ*\] let mut row: PatStack = other.sub_patterns(handler, span)?; row.append(&mut p_i_rest); rows.push(row); } // 2. *pⁱ₁* is a constructed pattern *c'(r₁, ..., rₐ)* where *c* != *c'*: // 2.1. no row is produced } } Ok(rows) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/matrix.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/analysis/matrix.rs
use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::Span; use crate::CompileError; use super::patstack::PatStack; /// A `Matrix` is a `Vec<PatStack>` that is implemented with special methods /// particular to the match exhaustivity algorithm. /// /// The number of rows of the `Matrix` is equal to the number of `PatStack`s and /// the number of columns of the `Matrix` is equal to the number of elements in /// the `PatStack`s. Each `PatStack` should contains the same number of /// elements. #[derive(Clone, Debug)] pub(crate) struct Matrix { rows: Vec<PatStack>, } impl Matrix { /// Creates an empty `Matrix`. pub(crate) fn empty() -> Self { Matrix { rows: vec![] } } /// Creates a `Matrix` with one row from a `PatStack`. pub(crate) fn from_pat_stack(pat_stack: PatStack) -> Self { Matrix { rows: vec![pat_stack], } } /// Pushes a `PatStack` onto the `Matrix`. pub(crate) fn push(&mut self, row: PatStack) { self.rows.push(row); } /// Appends a `Vec<PatStack>` onto the `Matrix`. pub(crate) fn append(&mut self, rows: &mut Vec<PatStack>) { self.rows.append(rows); } /// Returns a reference to the rows of the `Matrix`. pub(crate) fn rows(&self) -> &Vec<PatStack> { &self.rows } /// Returns the rows of the `Matrix`. pub(crate) fn into_rows(self) -> Vec<PatStack> { self.rows } /// Returns the number of rows *m* and the number of columns *n* of the /// `Matrix` in the form (*m*, *n*). pub(crate) fn m_n( &self, handler: &Handler, span: &Span, ) -> Result<(usize, usize), ErrorEmitted> { let first = match self.rows.first() { Some(first) => first, None => return Ok((0, 0)), }; let n = first.len(); for row in self.rows.iter().skip(1) { if row.len() != n { return Err(handler.emit_err(CompileError::Internal( "found invalid matrix size", span.clone(), ))); } } Ok((self.rows.len(), n)) } /// Computes Σ, where Σ is a `PatStack` containing the first element of /// every row of the `Matrix`. pub(crate) fn compute_sigma( &self, handler: &Handler, span: &Span, ) -> Result<PatStack, ErrorEmitted> { let mut pat_stack = PatStack::empty(); for row in self.rows.iter() { let first = row.first(handler, span)?; pat_stack.push(first.into_root_constructor()) } Ok(pat_stack.remove_duplicates()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_match_branch.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_match_branch.rs
use ast_node::expression::match_expression::typed::matcher::{ReqDeclNode, ReqOrVarDecl}; use indexmap::IndexSet; use itertools::{multiunzip, Itertools}; use sway_error::{ error::{CompileError, ShadowingSource}, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span, Spanned}; use crate::{ compiler_generated::{ generate_matched_or_variant_index_var_name, generate_matched_or_variant_variables_var_name, INVALID_MATCHED_OR_VARIABLE_INDEX_SIGNAL, }, language::{ parsed::MatchBranch, ty::{self, MatchBranchCondition, MatchedOrVariantIndexVars, TyExpression}, }, semantic_analysis::*, Engines, TypeInfo, UnifyCheck, }; use super::{instantiate::Instantiate, matcher::matcher, ReqDeclTree}; impl ty::TyMatchBranch { pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, typed_value: &ty::TyExpression, branch: MatchBranch, ) -> Result<(ty::TyMatchBranch, ty::TyScrutinee), ErrorEmitted> { let MatchBranch { scrutinee, result, span: branch_span, } = branch; // For the dummy span of all the instantiated code elements that cannot be mapped on // any of the elements from the original code, we will simply take the span of the // whole match arm. We assume that these spans will never be used. // This is also the error span in case of internal compiler errors. let instantiate = Instantiate::new(ctx.engines, branch_span.clone()); let type_engine = ctx.engines.te(); let engines = ctx.engines(); // Type check the scrutinee. let typed_scrutinee = ty::TyScrutinee::type_check(handler, ctx.by_ref(), scrutinee)?; // Calculate the requirements and variable declarations. let req_decl_tree = matcher( handler, ctx.by_ref(), typed_value, // This is the matched value. It gets propagated unchanged during matching for error reporting purposes. typed_value, // This is the same match value, but this time as the top level expression to be matched. typed_scrutinee.clone(), )?; // Emit errors for eventual multiple definitions of variables. // We stop further compilation in case of duplicates in order to // provide guarantee to the desugaring that all the requirements // are satisfied for all of the variables: // - existence in all OR variants with the same type // - no duplicates handler.scope(|handler| { for duplicate in collect_duplicate_match_pattern_variables(&typed_scrutinee) { handler.emit_err(CompileError::MultipleDefinitionsOfMatchArmVariable { match_value: typed_value.span.clone(), match_type: engines.help_out(typed_value.return_type).to_string(), first_definition: duplicate.first_definition.1, first_definition_is_struct_field: duplicate.first_definition.0, duplicate: duplicate.duplicate.1, duplicate_is_struct_field: duplicate.duplicate.0, }); } Ok(()) })?; // Emit errors for eventual uses of configurables in patterns. // Configurables cannot be used in pattern matching, since they are not compile-time // constants. Using a configurable will define a pattern variable, that will // then shadow the configurable of the same name, which is not allowed. // This can be very confusing in case someone tries to use configurables in pattern matching // in the same way as constants. So we provide helpful hints here. // We stop further compilation in case of finding configurables in patterns. handler.scope(|handler| { // All the first occurrences of variables in order of appearance, while respecting // if they are struct field variables. let variables: IndexSet<(Ident, bool)> = IndexSet::from_iter(collect_match_pattern_variables(&typed_scrutinee)); for (ident, is_struct_field) in variables { let default_handler = &Handler::default(); // If there exist a configurable with the same name as the pattern variable. if let Ok(ty::TyDecl::ConfigurableDecl(configurable_decl)) = ctx.resolve_symbol(default_handler, &ident) { let name = (&ident).into(); let configurable_span = engines .de() .get_configurable(&configurable_decl.decl_id) .span(); if is_struct_field { handler.emit_err(CompileError::ConfigurablesCannotBeShadowed { shadowing_source: ShadowingSource::PatternMatchingStructFieldVar, name, configurable_span, }); } else { handler.emit_err(CompileError::ConfigurablesCannotBeMatchedAgainst { name, configurable_span, }); } } } Ok(()) })?; let (condition, result_var_declarations, or_variant_vars) = instantiate_branch_condition_result_var_declarations_and_matched_or_variant_index_vars( handler, &mut ctx, &instantiate, &req_decl_tree, )?; // create a new namespace for this branch result ctx.scoped(handler, Some(branch_span.clone()), |scoped_ctx| { // for every variable that comes into result block, create a variable declaration, // insert it into the branch namespace, and add it to the block of code statements let mut code_block_contents: Vec<ty::TyAstNode> = vec![]; for (var_ident, var_body) in result_var_declarations { let var_decl = instantiate.var_decl(var_ident.clone(), var_body.clone()); let _ = scoped_ctx.insert_symbol(handler, var_ident.clone(), var_decl.clone()); code_block_contents.push(ty::TyAstNode { content: ty::TyAstNodeContent::Declaration(var_decl), span: var_ident.span(), }); } // type check the branch result let typed_result = { // If there is an expectation coming from the context via `ctx.type_annotation()` we need // to pass that contextual requirement to the branch in order to provide more specific contextual // information. E.g., that `Option<u8>` is expected. // But at the same time, we do not want to unify during type checking with that contextual information // at this stage, because the branch might get `TypeInfo::Unknown` as the expectation and diverge // at the same time. The divergence would unify `TypeInfo::Never` and `Unknown` in that case, leaving // `Never` as the expected type for the subsequent branches. // In order to pass the contextual information, but not to affect the original type with potential // unwanted unification with `Never`, we create a copies of the `ctx.type_annotation()` type and pass // it as the expectation to the branch. let type_annotation = (*type_engine.get(scoped_ctx.type_annotation())).clone(); let branch_ctx = scoped_ctx.by_ref().with_type_annotation(type_engine.insert( engines, type_annotation, None, )); ty::TyExpression::type_check(handler, branch_ctx, &result)? }; // Check if return type is Never if it is we don't unify as it would replace the Unknown annotation with Never. if !matches!(*type_engine.get(typed_result.return_type), TypeInfo::Never) { // unify the return type from the typed result with the type annotation // Note here that the `scoped_ctx` is actually the original `ctx` just scoped // to the `namespace`, thus, having the same original type annotation. // This unification is also the mechanism for carrying the type of a branch to // the subsequent branch. It potentially alters the type behind the `ctx.type_annotation()` // which will then be picked by the next branch. scoped_ctx.unify_with_type_annotation( handler, typed_result.return_type, &typed_result.span, ); } // if the typed branch result is a code block, then add the contents // of that code block to the block of code statements that we are already // generating. if the typed branch result is not a code block, then add // the typed branch result as an ast node to the block of code statements let typed_result_return_type = typed_result.return_type; let typed_result_span = typed_result.span.clone(); match typed_result.expression { ty::TyExpressionVariant::CodeBlock(ty::TyCodeBlock { mut contents, .. }) => { code_block_contents.append(&mut contents); } _ => { code_block_contents.push(ty::TyAstNode { content: ty::TyAstNodeContent::Expression(TyExpression { return_type: typed_result_return_type, span: typed_result_span.clone(), expression: ty::TyExpressionVariant::ImplicitReturn(Box::new( typed_result, )), }), span: typed_result_span.clone(), }); } } // assemble a new branch result that includes both the variable declarations // that we create and the typed result from the original untyped branch let new_result = ty::TyExpression { expression: ty::TyExpressionVariant::CodeBlock(ty::TyCodeBlock { contents: code_block_contents, whole_block_span: sway_types::Span::dummy(), }), return_type: typed_result_return_type, span: typed_result_span, }; let typed_branch = ty::TyMatchBranch { matched_or_variant_index_vars: or_variant_vars, condition, result: new_result, span: branch_span, }; Ok((typed_branch, typed_scrutinee)) }) } } type VarDecl = (Ident, ty::TyExpression); /// Declarations of variables that have to be inserted at the beginning /// of the match arm result. /// These can be simple variable declarations in the form `let <ident> = <exp>;` /// or the declarations of tuple variables holding values coming from OR /// variants. In the former case, the variable body can be an arbitrary long /// chain of nested `if` expressions: `let <tuple> = if .. else ..`. type ResultVarDeclarations = Vec<VarDecl>; /// Declarations of variables that are carried over from the lower parts /// of the [ReqDeclTree] towards the upper parts. The decision which of /// those variables should be added to [ResultVarDeclarations] is always /// done at the AND and OR nodes upper in the tree. /// The OR nodes can transform the variables before passing them to the /// upper nodes. type CarryOverVarDeclarations = Vec<VarDecl>; /// Declarations of tuple variables that are carried over from the lower parts /// of the [ReqDeclTree] towards the upper parts. The decision which of /// those tuple variables should be added to [ResultVarDeclarations] is always /// done at the AND and OR nodes upper in the tree. /// The OR nodes can embed tuple variables into definitions of other tuple /// variables, thus, not passing them any more to the upper nodes. type CarryOverTupleDeclarations = Vec<VarDecl>; /// Instantiates three artifacts, that are in the end carried over to the typed match expression /// via [ty::TyMatchBranch]: /// - branch condition: Overall condition that must be `true` for the branch to match. /// - result variable declarations: Variable declarations that needs to be added to the /// match branch result, before the actual body. Here we distinguish between the variables /// actually declared in the match arm pattern and so called "tuple variables" that are /// compiler generated and contain values for variables extracted out of individual OR variants. /// - OR variant index variables: Variable declarations that are generated in case of having /// variables in OR patterns. Index variables hold 1-based index of the OR variant being matched /// or zero if non of the OR variants has matched. /// /// ## Algorithm Overview /// The algorithm traverses the `req_decl_tree` bottom up from left to right and collects the /// overall condition, variable declarations, and tuple variable declarations. /// /// In general, if the visited node is not the root node, the variables and requirements encountered /// at that node must be carried over to the upper node that decides how to interpret them. /// /// E.g., if the upper node is an AND node with three sub nodes each having a requirement, the AND /// node will decide to combine the three requirements using the lazy and operator, and to pass only /// the new single requirement to the upper nodes. /// /// Detailed explanation on how the condition and carry over declarations are constructed and /// carried over is given on other implementation functions. /// /// Examples of resulting desugared match expressions can be found in the module description ([super]); fn instantiate_branch_condition_result_var_declarations_and_matched_or_variant_index_vars( handler: &Handler, ctx: &mut TypeCheckContext, instantiate: &Instantiate, req_decl_tree: &ReqDeclTree, ) -> Result< ( MatchBranchCondition, ResultVarDeclarations, MatchedOrVariantIndexVars, ), ErrorEmitted, > { let mut result_var_declarations = ResultVarDeclarations::new(); let mut or_variants_index_vars = MatchedOrVariantIndexVars::new(); let (condition, carry_over_var_declarations, carry_over_tuple_declarations) = recursively_instantiate_conditions_declarations_and_variant_index_vars( handler, ctx.by_ref(), instantiate, None, req_decl_tree.root(), &mut result_var_declarations, &mut or_variants_index_vars, )?; // At the end, there must not be any carry-over declarations. // All variable declarations must end up in the `result_var_declarations`. return if !(carry_over_var_declarations.is_empty() && carry_over_tuple_declarations.is_empty()) { Err(handler.emit_err(CompileError::Internal( "unable to extract match arm variables", instantiate.error_span(), ))) } else { Ok((condition, result_var_declarations, or_variants_index_vars)) }; fn recursively_instantiate_conditions_declarations_and_variant_index_vars( handler: &Handler, mut ctx: TypeCheckContext, instantiate: &Instantiate, parent_node: Option<&ReqDeclNode>, req_decl_node: &ReqDeclNode, result_var_declarations: &mut ResultVarDeclarations, or_variants_index_vars: &mut MatchedOrVariantIndexVars, ) -> Result< ( MatchBranchCondition, CarryOverVarDeclarations, CarryOverTupleDeclarations, ), ErrorEmitted, > { return match req_decl_node { ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::Req(req)) => { let condition = instantiate .eq_result(handler, ctx.by_ref(), req.0.clone(), req.1.clone()) .map(Some)?; Ok((condition, vec![], vec![])) } ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::VarDecl(decl)) => { if parent_node.is_none() { // I am the root/only node. Add my declaration to the result var declarations and pass no requirements and no carry over vars. result_var_declarations.push(decl.clone()); Ok((None, vec![], vec![])) } else { // I am embedded with an AND or OR node. The parent node needs to decide what to do with my variable declaration. Ok((None, vec![decl.clone()], vec![])) } } ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::Neither) => Ok((None, vec![], vec![])), ReqDeclNode::And(nodes) | ReqDeclNode::Or(nodes) => { instantiate_child_nodes_conditions_and_declarations( handler, ctx.by_ref(), instantiate, req_decl_node, parent_node.is_none(), nodes, result_var_declarations, or_variants_index_vars, ) } }; #[allow(clippy::too_many_arguments)] fn instantiate_child_nodes_conditions_and_declarations( handler: &Handler, mut ctx: TypeCheckContext, instantiate: &Instantiate, parent_node: &ReqDeclNode, parent_node_is_root_node: bool, nodes: &[ReqDeclNode], result_var_declarations: &mut ResultVarDeclarations, or_variant_index_vars: &mut MatchedOrVariantIndexVars, ) -> Result< ( MatchBranchCondition, CarryOverVarDeclarations, CarryOverTupleDeclarations, ), ErrorEmitted, > { let conditions_and_carry_overs: Result<Vec<_>, _> = nodes .iter() .map(|node| { recursively_instantiate_conditions_declarations_and_variant_index_vars( handler, ctx.by_ref(), instantiate, Some(parent_node), node, result_var_declarations, or_variant_index_vars, ) }) .collect(); let (conditions, carry_over_vars, carry_over_tuples): (Vec<_>, Vec<_>, Vec<_>) = multiunzip(conditions_and_carry_overs?); let (condition, vars, tuples) = match parent_node { ReqDeclNode::And(_) => { let conditions = conditions.into_iter().flatten().collect_vec(); let condition = match conditions[..] { [] => None, _ => Some(build_condition_expression(&conditions[..], &|lhs, rhs| { instantiate.lazy_and(lhs, rhs) })), }; let mut vars = carry_over_vars.into_iter().flatten().collect_vec(); let mut tuples = carry_over_tuples.into_iter().flatten().collect_vec(); if parent_node_is_root_node { // We are within an AND root node. Add all the variable declarations to the result var declarations and // return the calculated condition and no carry overs. // `vars` and `tuples` will be empty after appending. // Note that if we have more than one tuple in carry over, this means they // are coming from an AND node (because an OR node always produces a single tuple). // In that case the `vars` redefined in tuples are never the same and we can // safely declare them in any order after the tuples. result_var_declarations.append(&mut tuples); result_var_declarations.append(&mut vars); } // Return the condition and either the empty `vars` and `tuples` if the parent is the root node, or carry over // all the declarations from all the child nodes. (condition, vars, tuples) } ReqDeclNode::Or(_) => { let has_var_decls = carry_over_vars.iter().any(|v| !v.is_empty()); if has_var_decls { // We need to: // - instantiate the index variable for this OR. // - instantiate a single tuple variable that holds the variables taken from the alternatives. // - instantiate redefined declared variables that are initialized from the tuple fields. // Instantiate and return the expression for matched OR variant index variable. let suffix = or_variant_index_vars.len() + 1; let matched_or_variant_index_var_decl = instantiate_matched_or_variant_index_var_expression( instantiate, suffix, conditions, ); // Variable expression used to instantiate the corresponding tuple variable // that will hold matched variant variables. // Note that it is not needed to add the declaration of this variable // to the context in order for the tuple variable to be created. let matched_or_variant_index_variable = instantiate.var_exp( matched_or_variant_index_var_decl.0.clone(), matched_or_variant_index_var_decl.1.return_type, ); or_variant_index_vars.push(matched_or_variant_index_var_decl); // Instantiate the tuple variable and the redefined variable declarations // of the variables declared in OR variants. let (tuple, mut redefined_vars) = instantiate_matched_or_variant_vars_expressions( handler, ctx.by_ref(), instantiate, &matched_or_variant_index_variable, suffix, carry_over_vars, carry_over_tuples, )?; // Instantiate the new condition that will be just the check if the 1-based matched variant index is different // then zero. let condition = instantiate.neq_result( handler, ctx.by_ref(), matched_or_variant_index_variable, instantiate.u64_literal(0), )?; if parent_node_is_root_node { // We are within an OR root node. Add the tuple and all the variable declarations to the result var declarations and // return the calculated condition and no carry overs. result_var_declarations.push(tuple); result_var_declarations.append(&mut redefined_vars); (Some(condition), vec![], vec![]) } else { // Return the condition and or carry over the created tuple and // all the redefined variable declarations to the upper nodes. (Some(condition), redefined_vars, vec![tuple]) } } else { // No variable declarations in OR variants. // This also means we don't have tuples because they are created only to extract variables. // In this case we only have to calculate the final condition. let conditions = conditions.into_iter().flatten().collect_vec(); let condition = match conditions[..] { [] => None, _ => Some(build_condition_expression(&conditions[..], &|lhs, rhs| { instantiate.lazy_or(lhs, rhs) })), }; (condition, vec![], vec![]) } } _ => unreachable!("A parent node can only be an AND or an OR node."), }; Ok((condition, vars, tuples)) } fn build_condition_expression( expressions: &[ty::TyExpression], operator: &impl Fn(ty::TyExpression, ty::TyExpression) -> ty::TyExpression, ) -> ty::TyExpression { let (lhs, others) = expressions .split_first() .expect("The slice of requirement expressions must not be empty."); match others { [] => lhs.clone(), _ => operator(lhs.clone(), build_condition_expression(others, operator)), } } /// Instantiates an immutable variable declaration for the variable /// that tracks which of the OR variants got matched, if any. /// If one of the variants match, the variable will be initialized /// to the 1-based index of that variant counted from left to right. /// If none of the variants match the variable will be initialized /// to zero. /// /// ```ignore /// let __matched_or_variant_index_<suffix>: u64 = if <variant_1_condition> { /// 1u64 /// } else if <variant_2_condition> { /// 2u64 /// } else if ... { /// ... /// } else { /// 0u64 /// }; /// ``` fn instantiate_matched_or_variant_index_var_expression( instantiate: &Instantiate, suffix: usize, conditions: Vec<MatchBranchCondition>, ) -> (Ident, ty::TyExpression) { let ident = instantiate.ident(generate_matched_or_variant_index_var_name(suffix)); // Build the expression bottom up by putting the previous if expression into // the else part of the current one. // Note that we do not have any optimizations like removals of `else` in case of `if true`. // Match expression optimizations will be done on IR side. let number_of_alternatives = conditions.len(); let mut if_expr = instantiate.code_block_with_implicit_return_u64(0); for (rev_index, condition) in conditions.into_iter().rev().enumerate() { let condition = match condition { Some(condition_exp) => condition_exp, None => instantiate.boolean_literal(true), }; if_expr = ty::TyExpression { expression: ty::TyExpressionVariant::IfExp { condition: Box::new(condition), then: Box::new(instantiate.code_block_with_implicit_return_u64( (number_of_alternatives - rev_index).try_into().unwrap(), )), r#else: Some(Box::new(if_expr)), // Put the previous if into else. }, return_type: instantiate.u64_type(), span: instantiate.dummy_span(), } } (ident, if_expr) } /// Instantiates immutable variable declarations for all the variables /// declared in an OR match expression. /// Choosing the right initialization, the initialization coming from /// the OR variant that actually matched, is done by inspecting /// the result of the corresponding __matched_or_variant_index_<suffix> /// variable. /// /// The function returns: /// - a variable declaration of the tuple variable that holds /// the values of all the variables declared in the OR match expression /// - redefined declarations of each individual variable. /// /// ```ignore /// let __matched_or_variant_variables_<suffix>: <tuple> = if __matched_or_variant_index_<suffix> == 1 { /// <potential tuple declarations carried over from the child nodes> /// /// (<var_1_variant_1_initialization>, ..., <var_n_variant_1_initialization>) /// } else if __match_matched_or_variant_index_<suffix> == 2 { /// <potential tuple declarations carried over from the child nodes> /// /// (<var_1_variant_2_initialization>, ..., <var_n_variant_2_initialization>) /// } else if ... { /// ... /// } else { /// __revert(...) // This should never happen and means internal compiler error. /// }; /// /// let <var_1> = __matched_or_variant_variables_<suffix>.0; /// let <var_2> = __matched_or_variant_variables_<suffix>.1; /// ... /// let <var_n> = __matched_or_variant_variables_<suffix>.(n-1); /// ``` fn instantiate_matched_or_variant_vars_expressions( handler: &Handler, mut ctx: TypeCheckContext, instantiate: &Instantiate, matched_or_variant_index_var: &ty::TyExpression, suffix: usize, mut carry_over_vars: Vec<CarryOverVarDeclarations>, carry_over_tuples: Vec<CarryOverTupleDeclarations>, ) -> Result<(VarDecl, Vec<VarDecl>), ErrorEmitted> { let type_engine = ctx.engines.te(); // At this point we have the guarantee that we have: // - exactly the same variables in each of the OR variants // - that variables of the same name are of the same type // - that we do not have duplicates in variable names inside of alternatives // Sort variables in all alternatives by name to get deterministic ordering in the resulting tuple. // Note that the var declarations in match patterns are mutually independent, thus, // we can shuffle their ordering. for vars_in_alternative in carry_over_vars.iter_mut() { vars_in_alternative.sort_by(|(a, _), (b, _)| a.cmp(b)); } // Still, check the above guarantee and emit internal compiler errors if they are not satisfied. check_variables_guarantee( handler, ctx.engines, &carry_over_vars, instantiate.error_span(), )?; // Build the `if-else` chain for the declaration of the tuple variable. // Build it bottom up, means traverse in reverse order. // All variants have same variable types and names, thus we pick them from the first alternative. let tuple_field_types = carry_over_vars[0] .iter() .map(|(_, var_body)| var_body.return_type) .collect(); let tuple_type = type_engine.insert_tuple_without_annotations(ctx.engines, tuple_field_types); let variable_names = carry_over_vars[0] .iter() .map(|(ident, _)| ident.clone()) .collect_vec(); // Build the expression bottom up by putting the previous if expression into // the else part of the current one. let number_of_alternatives = carry_over_vars.len(); let mut if_expr = instantiate
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_scrutinee.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_scrutinee.rs
use std::collections::BTreeMap; use ast_elements::{type_argument::GenericTypeArgument, type_parameter::GenericTypeParameter}; use itertools::Itertools; use sway_error::{ error::{CompileError, StructFieldUsageContext}, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span, Spanned}; use crate::{ decl_engine::{DeclEngineGetParsedDeclId, DeclEngineInsert}, language::{ parsed::*, ty::{self, StructAccessInfo, TyDecl, TyScrutinee, TyStructDecl, TyStructField}, CallPath, CallPathType, }, semantic_analysis::{TypeCheckContext, TypeCheckFinalization, TypeCheckFinalizationContext}, type_system::*, }; impl TyScrutinee { pub(crate) fn type_check( handler: &Handler, mut ctx: TypeCheckContext, scrutinee: Scrutinee, ) -> Result<Self, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); match scrutinee { Scrutinee::Or { elems, span } => { let mut typed_elems = Vec::with_capacity(elems.len()); for scrutinee in elems { typed_elems.push(ty::TyScrutinee::type_check( handler, ctx.by_ref(), scrutinee, )?); } let typed_scrutinee = ty::TyScrutinee { variant: ty::TyScrutineeVariant::Or(typed_elems), type_id: type_engine.new_unknown(), span, }; Ok(typed_scrutinee) } Scrutinee::CatchAll { span } => { let typed_scrutinee = ty::TyScrutinee { variant: ty::TyScrutineeVariant::CatchAll, // The `span` will mostly point to a "_" in code. However, match expressions // are heavily used in code generation, e.g., to generate code for contract // function selection in the `__entry` and sometimes the span does not point // to a "_". But it is always in the code in which the match expression is. type_id: type_engine.new_placeholder(TypeParameter::Type( GenericTypeParameter::new_placeholder( type_engine.new_unknown(), span.clone(), ), )), span, }; Ok(typed_scrutinee) } Scrutinee::Literal { value, span } => { let typed_scrutinee = ty::TyScrutinee { variant: ty::TyScrutineeVariant::Literal(value.clone()), type_id: type_engine.insert(engines, value.to_typeinfo(), span.source_id()), span, }; Ok(typed_scrutinee) } Scrutinee::Variable { name, span } => type_check_variable(handler, ctx, name, span), Scrutinee::StructScrutinee { struct_name, fields, span, } => type_check_struct(handler, ctx, struct_name.suffix, &fields, span), Scrutinee::EnumScrutinee { call_path, value, span, } => type_check_enum(handler, ctx, call_path, *value, span), Scrutinee::AmbiguousSingleIdent(ident) => { let maybe_enum = type_check_enum( &Handler::default(), ctx.by_ref(), CallPath { prefixes: vec![], suffix: ident.clone(), callpath_type: CallPathType::Ambiguous, }, Scrutinee::Tuple { elems: vec![], span: ident.span(), }, ident.span(), ); if maybe_enum.is_ok() { maybe_enum } else { type_check_variable(handler, ctx, ident.clone(), ident.span()) } } Scrutinee::Tuple { elems, span } => type_check_tuple(handler, ctx, elems, span), Scrutinee::Error { err, .. } => Err(err), } } /// Returns true if the [ty::TyScrutinee] consists only of catch-all scrutinee variants, recursively. /// Catch-all variants are .., _, and variables. E.g.: /// /// ```ignore /// (_, x, Point { .. }) /// ``` /// /// An [ty::TyScrutineeVariant::Or] is considered to be catch-all if any of its alternatives /// is a catch-all [ty::TyScrutinee] according to the above definition. E.g.: /// /// ```ignore /// (1, x, Point { x: 3, y: 4 }) | (_, x, Point { .. }) /// ``` /// /// A catch-all [ty::TyScrutinee] matches all the values of its corresponding type. /// /// A scrutinee that matches all the values of its corresponding type but does not /// consists only of catch-all variants will not be considered a catch-all scrutinee. /// E.g., although it matches all values of `bool`, this scrutinee is not considered to /// be a catch-all scrutinee: /// /// ```ignore /// true | false /// ``` pub(crate) fn is_catch_all(&self) -> bool { match &self.variant { ty::TyScrutineeVariant::CatchAll => true, ty::TyScrutineeVariant::Variable(_) => true, ty::TyScrutineeVariant::Literal(_) => false, ty::TyScrutineeVariant::Constant { .. } => false, ty::TyScrutineeVariant::StructScrutinee { fields, .. } => fields .iter() .filter_map(|x| x.scrutinee.as_ref()) .all(|x| x.is_catch_all()), ty::TyScrutineeVariant::Or(elems) => elems.iter().any(|x| x.is_catch_all()), ty::TyScrutineeVariant::Tuple(elems) => elems.iter().all(|x| x.is_catch_all()), ty::TyScrutineeVariant::EnumScrutinee { .. } => false, } } } /// Type checks the `name`, assuming that it's either a variable or an ambiguous identifier /// that might be a constant or configurable. fn type_check_variable( handler: &Handler, ctx: TypeCheckContext, name: Ident, span: Span, ) -> Result<ty::TyScrutinee, ErrorEmitted> { let engines = ctx.engines; let type_engine = engines.te(); let decl_engine = engines.de(); let typed_scrutinee = match ctx.resolve_symbol(&Handler::default(), &name).ok() { // If the name represents a constant, then we turn it into a [ty::TyScrutineeVariant::Constant]. Some(ty::TyDecl::ConstantDecl(ty::ConstantDecl { decl_id, .. })) => { let constant_decl = (*decl_engine.get_constant(&decl_id)).clone(); let value = match constant_decl.value { Some(ref value) => value, None => { return Err(handler.emit_err(CompileError::Internal( "Constant value does not contain expression", span, ))); } }; let literal = match value.extract_literal_value() { Some(value) => value, None => { return Err(handler.emit_err(CompileError::Unimplemented { feature: "Supporting constant values of this type in patterns".to_string(), help: vec![], span, })); } }; ty::TyScrutinee { type_id: value.return_type, variant: ty::TyScrutineeVariant::Constant(name, literal, constant_decl), span, } } // If the name isn't a constant, we turn it into a [ty::TyScrutineeVariant::Variable]. // // Note that the declaration could be a configurable declaration, [ty::ConfigurableDecl]. // Configurables cannot be matched against, but we do not emit that error here. // That would unnecessary short-circuit the compilation and reduce number of errors // collected. // Rather, we consider the configurable to be a pattern variable declaration, which // strictly speaking it is. Later when checking typed match arm, we will emit // appropriate helpful errors, depending on the exact usage of that configurable. _ => ty::TyScrutinee { variant: ty::TyScrutineeVariant::Variable(name), type_id: type_engine.new_unknown(), span, }, }; Ok(typed_scrutinee) } fn type_check_struct( handler: &Handler, mut ctx: TypeCheckContext, struct_name: Ident, fields: &[StructScrutineeField], span: Span, ) -> Result<ty::TyScrutinee, ErrorEmitted> { let engines = ctx.engines; let type_engine = engines.te(); let decl_engine = engines.de(); // find the struct definition from the name let unknown_decl = ctx.resolve_symbol(handler, &struct_name)?; let struct_id = unknown_decl.to_struct_decl(handler, ctx.engines())?; let mut struct_decl = (*decl_engine.get_struct(&struct_id)).clone(); // monomorphize the struct definition ctx.monomorphize( handler, &mut struct_decl, &mut [], BTreeMap::new(), EnforceTypeArguments::No, &struct_name.span(), )?; let (struct_can_be_changed, is_public_struct_access) = StructAccessInfo::get_info(ctx.engines(), &struct_decl, ctx.namespace()).into(); let has_rest_pattern = fields .iter() .any(|field| matches!(field, StructScrutineeField::Rest { .. })); // check for field existence and type check nested scrutinees; short-circuit if there are non-existing fields // TODO: Is short-circuiting really needed or was it more a convenience? In the first implementation // we had a short-circuit on the first error non-existing field and didn't even collecting all errors. let mut typed_fields = vec![]; handler.scope(|handler| { for field in fields.iter() { match field { StructScrutineeField::Field { field, scrutinee, span, } => { // ensure that the struct definition has this field let struct_field = match expect_struct_field( &struct_decl, handler, field, has_rest_pattern, is_public_struct_access, ) { Ok(struct_field) => struct_field, Err(_) => continue, }; // type check the nested scrutinee let typed_scrutinee = match scrutinee { None => None, Some(scrutinee) => Some(ty::TyScrutinee::type_check( handler, ctx.by_ref(), scrutinee.clone(), )?), }; typed_fields.push(ty::TyStructScrutineeField { field: field.clone(), scrutinee: typed_scrutinee, span: span.clone(), field_def_name: struct_field.name.clone(), }); } StructScrutineeField::Rest { .. } => {} } } Ok(()) })?; handler.scope(|handler| { // report struct field privacy errors // This check is intentionally separated from checking the field existence and type-checking the scrutinees. // While we could check private field access immediately after finding the field and emit errors, // that would mean short-circuiting in case of privacy issues which we do not want to do. // The consequence is repeating the search for fields here, but the performance penalty is negligible. if is_public_struct_access { for field in fields { match field { StructScrutineeField::Field { field: ref field_name, .. } => { let struct_field = struct_decl .find_field(field_name) .expect("The struct field with the given field name must exist."); if struct_field.is_private() { handler.emit_err(CompileError::StructFieldIsPrivate { field_name: field_name.into(), struct_name: struct_decl.call_path.suffix.clone(), field_decl_span: struct_field.name.span(), struct_can_be_changed, usage_context: StructFieldUsageContext::PatternMatching { has_rest_pattern, }, }); } } StructScrutineeField::Rest { .. } => {} } } } // ensure that the pattern uses all fields of the struct unless the rest pattern is present // Here we follow the approach Rust has, and show a dedicated error if only all public fields are // listed, but the mandatory `..` (because of the private fields) is missing because the struct // has private fields and is used outside of its decl module. // Also, in case of privacy issues and mixing public and private fields we list only the public // fields as missing. // The error message in both cases gives adequate explanation how to fix the reported issue. if !has_rest_pattern && (struct_decl.fields.len() != typed_fields.len()) { let all_public_fields_are_matched = struct_decl .fields .iter() .filter(|f| f.is_public()) .all(|f| typed_fields.iter().any(|tf| f.name == tf.field)); let only_public_fields_are_matched = typed_fields .iter() .map(|tf| { struct_decl .find_field(&tf.field) .expect("The struct field with the given field name must exist.") }) .all(|f| f.is_public()); // In the case of public access where all public fields are listed along with some private fields, // we already have an error emitted for those private fields with the detailed, pattern matching related // explanation that proposes using ignore `..`. if !(is_public_struct_access && all_public_fields_are_matched && !only_public_fields_are_matched) { let missing_fields = |only_public: bool| { struct_decl .fields .iter() .filter(|f| !only_public || f.is_public()) .filter(|f| !typed_fields.iter().any(|tf| f.name == tf.field)) .map(|field| field.name.clone()) .collect_vec() }; handler.emit_err( match ( is_public_struct_access, all_public_fields_are_matched, only_public_fields_are_matched, ) { // Public access. Only all public fields are matched. All missing fields are private. // -> Emit error for the mandatory ignore `..`. (true, true, true) => { CompileError::MatchStructPatternMustIgnorePrivateFields { private_fields: missing_fields(false), struct_name: struct_decl.call_path.suffix.clone(), struct_decl_span: struct_decl.span(), all_fields_are_private: struct_decl.has_only_private_fields(), span: span.clone(), } } // Public access. All public fields are matched. Some private fields are matched. // -> Do not emit error here because it is already covered when reporting private field. (true, true, false) => { unreachable!("The above if condition eliminates this case.") } // Public access. Some or non of the public fields are matched. Some or none of the private fields are matched. // -> Emit error listing only missing public fields. Recommendation for mandatory use of `..` is already given // when reporting the inaccessible private field. // or // In struct decl module access. We do not distinguish between private and public fields here. // -> Emit error listing all missing fields. (true, false, _) | (false, _, _) => { CompileError::MatchStructPatternMissingFields { missing_fields: missing_fields(is_public_struct_access), missing_fields_are_public: is_public_struct_access, struct_name: struct_decl.call_path.suffix.clone(), struct_decl_span: struct_decl.span(), total_number_of_fields: struct_decl.fields.len(), span: span.clone(), } } }, ); } } Ok(()) })?; let struct_ref = decl_engine.insert( struct_decl, decl_engine.get_parsed_decl_id(&struct_id).as_ref(), ); let typed_scrutinee = ty::TyScrutinee { type_id: type_engine.insert_struct(engines, *struct_ref.id()), span, variant: ty::TyScrutineeVariant::StructScrutinee { struct_ref, fields: typed_fields, instantiation_call_path: CallPath { prefixes: vec![], suffix: struct_name, callpath_type: CallPathType::Ambiguous, }, }, }; return Ok(typed_scrutinee); fn expect_struct_field<'a>( struct_decl: &'a TyStructDecl, handler: &Handler, field_name: &Ident, has_rest_pattern: bool, is_public_struct_access: bool, ) -> Result<&'a TyStructField, ErrorEmitted> { match struct_decl.find_field(field_name) { Some(field) => Ok(field), None => Err(handler.emit_err(CompileError::StructFieldDoesNotExist { field_name: field_name.into(), available_fields: struct_decl.accessible_fields_names(is_public_struct_access), is_public_struct_access, struct_name: struct_decl.call_path.suffix.clone(), struct_decl_span: struct_decl.span(), struct_is_empty: struct_decl.is_empty(), usage_context: StructFieldUsageContext::PatternMatching { has_rest_pattern }, })), } } } impl TypeCheckFinalization for TyScrutinee { fn type_check_finalize( &mut self, _handler: &Handler, _ctx: &mut TypeCheckFinalizationContext, ) -> Result<(), ErrorEmitted> { Ok(()) } } fn type_check_enum( handler: &Handler, mut ctx: TypeCheckContext, call_path: CallPath<Ident>, value: Scrutinee, span: Span, ) -> Result<ty::TyScrutinee, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); let engines = ctx.engines(); let mut prefixes = call_path.prefixes.clone(); let (callsite_span, enum_id, call_path_decl) = match prefixes.pop() { Some(enum_name) => { let enum_callpath = CallPath { suffix: enum_name, prefixes, callpath_type: call_path.callpath_type, }; // find the enum definition from the name let unknown_decl = ctx.resolve_call_path(handler, &enum_callpath)?; let enum_id = unknown_decl.to_enum_id(handler, ctx.engines())?; (enum_callpath.span(), enum_id, unknown_decl) } None => { // we may have an imported variant let decl = ctx.resolve_call_path(handler, &call_path)?; if let TyDecl::EnumVariantDecl(ty::EnumVariantDecl { enum_ref, .. }) = decl.clone() { (call_path.suffix.span(), *enum_ref.id(), decl) } else { return Err(handler.emit_err(CompileError::EnumNotFound { name: call_path.suffix.clone(), span: call_path.suffix.span(), })); } } }; let mut enum_decl = (*decl_engine.get_enum(&enum_id)).clone(); let variant_name = call_path.suffix.clone(); // monomorphize the enum definition ctx.monomorphize( handler, &mut enum_decl, &mut [], BTreeMap::new(), EnforceTypeArguments::No, &callsite_span, )?; // check to see if the variant exists and grab it if it does let variant = enum_decl .expect_variant_from_name(handler, &variant_name) .cloned()?; // type check the nested scrutinee let typed_value = ty::TyScrutinee::type_check(handler, ctx, value)?; let enum_ref = decl_engine.insert(enum_decl, decl_engine.get_parsed_decl_id(&enum_id).as_ref()); let typed_scrutinee = ty::TyScrutinee { variant: ty::TyScrutineeVariant::EnumScrutinee { enum_ref: enum_ref.clone(), variant: Box::new(variant), call_path_decl, value: Box::new(typed_value), instantiation_call_path: call_path, }, type_id: type_engine.insert_enum(engines, *enum_ref.id()), span, }; Ok(typed_scrutinee) } fn type_check_tuple( handler: &Handler, mut ctx: TypeCheckContext, elems: Vec<Scrutinee>, span: Span, ) -> Result<ty::TyScrutinee, ErrorEmitted> { let type_engine = ctx.engines.te(); let engines = ctx.engines(); let mut typed_elems = vec![]; for elem in elems.into_iter() { typed_elems.push( match ty::TyScrutinee::type_check(handler, ctx.by_ref(), elem) { Ok(res) => res, Err(_) => continue, }, ); } let type_id = type_engine.insert_tuple( engines, typed_elems .iter() .map(|elem| GenericTypeArgument { type_id: elem.type_id, initial_type_id: elem.type_id, span: elem.span.clone(), call_path_tree: None, }) .collect(), ); let typed_scrutinee = ty::TyScrutinee { variant: ty::TyScrutineeVariant::Tuple(typed_elems), type_id, span, }; Ok(typed_scrutinee) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/mod.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/mod.rs
//! This module type checks `match` expressions and desugars `match` expressions to `if` expressions. //! The desugaring does not provides any kind of optimizations. It provides a structure that //! can later on be used for code analysis by reusing the existing analysis available for `if` expressions. //! The optimizations will be done on the IR level. //! //! ## Type Checking //! //! The central module for type checking is the [matcher]. //! //! The [matcher::matcher] function will type check the matched value with the match arm pattern (scrutinee). //! Successful type check will result in an [matcher::ReqDeclTree] that accurately represents all the //! requirements and variable declarations given by the scrutinee pattern. //! //! The resulting [matcher::ReqDeclTree] will be given over to [crate::ty::TyMatchBranch] for additional //! type checking. E.g., checking for duplicates in declared variables is done on this stage. //! //! ## Desugaring //! //! Desugaring to `if` expressions starts in the [crate::ty::TyMatchBranch] where three artifacts are provided //! for a particular match branch (arm): //! - branch condition: Overall condition that must be `true` for the branch to match. //! - result variable declarations: Variable declarations that needs to be added to the //! match branch result, before the actual body. Here we distinguish between the variables //! actually declared in the match arm pattern and so called "tuple variables" that are //! compiler generated and contain values for variables extracted out of individual OR variants. //! - OR variant index variables: Variable declarations that are generated in case of having //! variables in OR patterns. Index variables hold 1-based index of the OR variant being matched //! or zero if non of the OR variants has matched. //! //! Afterwards, these three artifacts coming from every individual branch are glued together in the //! [crate::ty::TyMatchExpression] to form the final desugaring. //! //! The desugared `if-else` chains end either in an `else` that contains the result of the last match arm, if the //! match arm is a catch-all arm, or in a `__revert(...)` call with the dedicated revert code. //! These reverts can happen only if we have bugs in the implementation of match expressions and //! is the only safe way to communicate compiler bug detectable only at runtime. //! //! ## Desugaring Examples //! //! The easiest way to explain the desugaring algorithm is to take a look at a few examples of //! different kinds of match arm patterns, and how they are desugared. //! //! Applying the rules sketched below recursively, we can desugar an arbitrary match arm pattern. //! //! ### Literals, Constants, and Variables //! //! In case of literals, constants, and variables the desugared `if` expression is straightforward. //! //! ```ignore //! match exp { //! 1 => 111, //! CONST_X => 222, //! x => x + x, //! } //! ``` //! ```ignore //! let __matched_value_1 = exp; //! if __matched_value_1 == 1 { //! 111 //! } //! else if __matched_value_1 == CONST_X { //! 222 //! } //! else { //! let x = __matched_value_1; //! x + x //! } //! else { //! __revert(14757395258967588866) //! } //! ``` //! //! If the last match arm is not a catch-all arm, the `if-else` chain will end in a `__revert()`. //! //! ```ignore //! match exp { //! true => 111, //! false => 222, //! } //! ``` //! ```ignore //! let __matched_value_1 = exp; //! if __matched_value_1 == true { //! 111 //! } //! else if __matched_value_1 == false { //! 222 //! } //! else { //! __revert(14757395258967588866) //! } //! ``` //! //! ### Structs, Enums, Tuples //! //! In case of structs, enums, and tuples the overall requirement becomes the lazy AND of //! all requirements, and all the variables get extracted. //! //! The construction of the match arm condition and the extraction of variables works //! recursively in case of nested structures. E.g., if we have struct fields being enums //! of tuples of structs etc. //! //! But the resulting condition will always contain only the lazy AND operator and all the //! variable definitions will be listed at the top of the match arm result. //! //! ```ignore //! struct Point { //! x: u64, //! y: u64 //! z: u64 //! } //! //! match p { //! Point { x: a, y: 22, z: 33 } => { a }, //! Point { x: 11, y, z: 33} => { y }, //! Point { z, .. } => { z }, //! } //! ``` //! ```ignore //! let __matched_value_1 = p; //! if __matched_value_1.y == 22 && __matched_value_1.z == 33 { //! let a = __matched_value_1.x; //! a //! } //! if __matched_value_1.x == 11 && __matched_value_1.z == 33 { //! let y = __matched_value_1.y; //! y //! } //! else { //! let z = __matched_value_1.z; //! z //! } //! ``` //! //! ### Or Patterns //! //! In case of or patterns without variables, the resulting desugaring is again straightforward. //! We simply construct the overall condition by using the lazy OR operator. //! //! ```ignore //! match exp { //! 1 | 2 => 111, //! CONST_X | CONST_Y => 222, //! x => x + x, //! } //! ``` //! ```ignore //! let __matched_value_1 = exp; //! if __matched_value_1 == 1 || __matched_value_1 == 2 { //! 111 //! } //! else if __matched_value_1 == CONST_X || __matched_value_1 == CONST_Y { //! 222 //! } //! else { //! let x = __matched_value_1; //! x + x //! } //! ``` //! //! In case of having or patterns with variables, the desugaring pattern gets more complex. //! Essentially, we have to extract the variables exactly from the variant that has matched. //! Also, we want to check the conditions for every variant exactly once. //! //! To accomplish this, we move the checking of variants outside of the match arm `if` and //! track the 1-based index of the matched variant in a so called "matched or variant index variable". //! If no variant matches this variable will be set to zero. //! //! We create such "matched or variant index variable" for every or pattern with variables that we //! encounter in the match arm pattern. //! //! Afterwards, in the match arm `if` condition we just check if the index variable is different then //! zero which means there is a match. //! //! To properly extract the variables, in the result, we again check which variant has matched and //! store all the variables from that variant in a tuple variable called "matched or variants variables". //! In these tuple variables, the values of the declared variables are stored ordered by the variable name. //! We can safely do this, knowing that at this point we have fully valid variables, e.g., no duplicates. //! //! The final definition of the variables declared in the or pattern is then tuple access to the //! element of the tuple that holds the value of that particular variable. //! //! ```ignore //! enum Enum { //! A: (u64, u64, u64), //! B: (u64, u64, u64), //! C: (u64, u64, u64), //! } //! //! match e { //! Enum::A((y, _, x)) | Enum::B((_, x, y)) | Enum::C((x, _, y)) => x + y, //! }; //! ``` //! ```ignore //! let __matched_value_1 = e; //! { //! let __matched_or_variant_index_1 = if __matched_value_1 is Enum::A { //! 1 // First OR variant matches. //! } //! else if __matched_value_1 is Enum::B { //! 2 // Second OR variant matches. //! } //! else if __matched_value_1 is Enum::C { //! 3 // Third OR variant matches. //! } //! else { //! 0 // None of the variants matches. //! }; // //! if __matched_or_variant_index_1 != 0 { // If any of the variants has matched, means if the arm matches. //! // Store the values of the variables in a tuple, ordered alphabetically by the variable name. //! let __matched_or_variant_variables_1 = if __matched_or_variant_index_1 == 1 { //! // If the first OR variant has matched. //! ((__matched_value_1 as A: (u64, u64, u64)).2, // Take x from the third (2) element of Enum::A. //! (__matched_value_1 as A: (u64, u64, u64)).0) // Take y from the first (0) element of Enum::A. //! } //! else if __matched_or_variant_index_1 == 2 { //! // If the second OR variant has matched. //! ((__matched_value_1 as B: (u64, u64, u64)).1, // Take x from the second (1) element of Enum::B. //! (__matched_value_1 as B: (u64, u64, u64)).2) // Take y from the third (2) element of Enum::B. //! } //! else if __matched_or_variant_index_1 == 3 { //! // If the third OR variant has matched. //! ((__matched_value_1 as C: (u64, u64, u64)).0, // Take x from the first (0) element of Enum::C. //! (__matched_value_1 as C: (u64, u64, u64)).2) // Take y from the third (2) element of Enum::C. //! } //! else { //! __revert(14757395258967588865) //! }; //! //! // Finally, define the declared variable x and y to take their values from the tuple. //! let x = __matched_or_variant_variables_1.0; //! let y = __matched_or_variant_variables_1.1; //! //! x + y //! } //! else { //! __revert(14757395258967588866) //! } //!} //! ``` //! //! In the case of nested OR patterns, there will be a one `__matched_or_variant_index_<unique suffix>` variable for //! every encountered OR pattern and they will all be listed above the match arm `if` expression. //! Also, in that case, the `if-else` definitions of `__matched_or_variant_variables_<unique suffix>` variables will //! be contained within the `if-else` definitions of their parent `__matched_or_variant_variables_<unique suffix>` variables. //! //! For the record, an alternative approach was also considered, in which the tuple variables are declared immediately //! during the check if variants match. Such tuples would carry a boolean field to communicate if there was a match and //! in case of a non-match the last tuple would have dummy values from the previous one. This would save us double checking //! which variant has match, but would mean always instantiating a tuple that is not needed in a case of non-match. //! In this trade-off we went for the option explained above. //! Note that we will anyhow optimize match expressions on the IR level. mod instantiate; mod matcher; mod typed_match_branch; mod typed_match_expression; mod typed_scrutinee; use matcher::ReqDeclTree;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_match_expression.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/typed_match_expression.rs
use crate::{ compiler_generated::INVALID_DESUGARED_MATCHED_EXPRESSION_SIGNAL, language::{ parsed::*, ty::{self, TyExpression}, }, semantic_analysis::{ ast_node::expression::typed_expression::instantiate_if_expression, expression::match_expression::typed::instantiate::Instantiate, TypeCheckContext, }, CompileError, TypeId, }; use std::ops::ControlFlow; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::{Span, Spanned}; // Trie for matching strings is disabled as it is not generating the best possible code. // Sometimes mac=tching by the trie is actually worse than comparing each individually // Enable this to see a pseudo-code printed to understand what is being generated. // const RADIX_TRIE_DEBUG: bool = false; // #[derive(Default, Debug, Clone)] // struct TrieNode { // output: Option<usize>, // previous: Option<usize>, // next: BTreeMap<String, usize>, // } // struct Trie { // nodes: Vec<TrieNode>, // } // fn revert(type_engine: &TypeEngine) -> TyExpression { // TyExpression { // expression: TyExpressionVariant::IntrinsicFunction(TyIntrinsicFunctionKind { // kind: sway_ast::Intrinsic::Revert, // arguments: vec![TyExpression { // expression: TyExpressionVariant::Literal(crate::language::Literal::U64(17)), // return_type: type_engine.id_of_u64(), // span: Span::dummy(), // }], // type_arguments: vec![], // span: Span::dummy(), // }), // return_type: type_engine.id_of_never(), // span: Span::dummy(), // } // } impl ty::TyMatchExpression { pub(crate) fn type_check( handler: &Handler, ctx: TypeCheckContext, typed_value: ty::TyExpression, branches: Vec<MatchBranch>, span: Span, ) -> Result<(ty::TyMatchExpression, Vec<ty::TyScrutinee>), ErrorEmitted> { // type check all of the branches let mut typed_branches = vec![]; let mut typed_scrutinees = vec![]; let mut ctx = ctx.with_help_text("all branches of a match statement must return the same type"); handler.scope(|handler| { for branch in branches.into_iter() { let (typed_branch, typed_scrutinee) = match ty::TyMatchBranch::type_check( handler, ctx.by_ref(), &typed_value, branch, ) { Ok(res) => res, Err(_) => continue, }; typed_branches.push(typed_branch); typed_scrutinees.push(typed_scrutinee); } Ok(()) })?; let typed_exp = ty::TyMatchExpression { value_type_id: typed_value.return_type, branches: typed_branches, return_type_id: ctx.type_annotation(), span, }; Ok((typed_exp, typed_scrutinees)) } pub(crate) fn desugar( self, handler: &Handler, ctx: TypeCheckContext, ) -> Result<ty::TyExpression, ErrorEmitted> { let instantiate = Instantiate::new(ctx.engines, self.span.clone()); if self.branches.is_empty() { return Self::instantiate_if_expression_for_empty_match_expression( handler, ctx, &instantiate, self.value_type_id, self.return_type_id, self.span.clone(), ); } let typed_if_exp = handler .scope(|handler| self.desugar_to_typed_if_expression(instantiate, ctx, handler))?; Ok(typed_if_exp) } // fn desugar_to_radix_trie( // &self, // mut ctx: TypeCheckContext<'_>, // ) -> Result<TyExpression, ErrorEmitted> { // let type_engine = ctx.engines.te(); // let branch_return_type_id = self // .branches // .iter() // .map(|x| x.result.return_type) // .next() // .unwrap(); // let matched_value = self // .branches // .iter() // .flat_map(|x| match &x.condition.as_ref().map(|x| &x.expression) { // Some(TyExpressionVariant::FunctionApplication { arguments, .. }) => { // Some(&arguments[0].1) // } // _ => None, // }) // .next() // .unwrap(); // // the block for the wildcard arm // let wildcard_return_expr = self // .branches // .iter() // .filter(|x| x.condition.is_none()) // .map(|x| x.result.clone()) // .next() // .unwrap_or_else(|| revert(type_engine)); // // All the match string slices, ignoring the wildcard // let match_arms_string_slices = self // .branches // .iter() // .flat_map(|x| match &x.condition.as_ref().map(|x| &x.expression) { // Some(TyExpressionVariant::FunctionApplication { arguments, .. }) => { // match &arguments[1].1.expression { // TyExpressionVariant::Literal(crate::language::Literal::String(v)) => { // Some(v.as_str().to_string()) // } // _ => None, // } // } // _ => None, // }) // .collect::<Vec<_>>(); // // group match arms by size of the arm string slice // let match_arms_by_size = match_arms_string_slices.iter().enumerate().fold( // BTreeMap::<usize, Vec<(String, usize)>>::new(), // |mut map, (i, item)| { // map.entry(item.len()).or_default().push((item.clone(), i)); // map // }, // ); // // create and compress all tries. One per arm size // let tries = match_arms_by_size // .values() // .map(|branches| self.generate_radix_trie(branches).unwrap()) // .collect::<Vec<Trie>>(); // // Navigate all valid nodes and collect string pieces. // // Then pack them starting from the biggest. // let mut string_pieces = tries // .iter() // .flat_map(|x| x.nodes.iter()) // .flat_map(|x| x.next.keys().cloned()) // .collect::<Vec<String>>(); // string_pieces.sort_by(|l, r| l.len().cmp(&r.len()).reverse()); // let packed_strings = string_pieces // .into_iter() // .fold(String::new(), |mut pack, item| { // if !pack.contains(&item) { // pack.push_str(&item); // } // pack // }); // if RADIX_TRIE_DEBUG { // println!("let packed_string = {packed_strings:?}"); // } // // Now create the outer expression checking the size of the string slice // let mut block = wildcard_return_expr.clone(); // for ((k, _), trie) in match_arms_by_size.into_iter().zip(tries.into_iter()) { // if RADIX_TRIE_DEBUG { // println!("if str.len() == {k}"); // } // let expression = TyExpressionVariant::AsmExpression { // registers: vec![ // TyAsmRegisterDeclaration { // name: Ident::new_no_span("is_eq".into()), // initializer: None, // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("slice".into()), // initializer: Some(matched_value.clone()), // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("len".into()), // initializer: None, // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("expected_len".into()), // initializer: Some(TyExpression { // expression: TyExpressionVariant::Literal( // crate::language::Literal::U64(k as u64), // ), // return_type: type_engine.id_of_u64(), // span: Span::dummy(), // }), // }, // ], // body: vec![ // AsmOp { // op_name: Ident::new_no_span("lw".into()), // op_args: vec![ // BaseIdent::new_no_span("len".into()), // BaseIdent::new_no_span("slice".into()), // ], // immediate: Some(BaseIdent::new_no_span("i1".into())), // span: Span::dummy(), // }, // AsmOp { // op_name: Ident::new_no_span("eq".into()), // op_args: vec![ // BaseIdent::new_no_span("is_eq".into()), // BaseIdent::new_no_span("len".into()), // BaseIdent::new_no_span("expected_len".into()), // ], // immediate: None, // span: Span::dummy(), // }, // ], // returns: Some(( // AsmRegister { // name: "is_eq".into(), // }, // Span::dummy(), // )), // whole_block_span: self.span.clone(), // }; // let then_node = self // .generate_radix_tree_checks( // ctx.by_ref(), // matched_value, // branch_return_type_id, // wildcard_return_expr.clone(), // trie, // &packed_strings, // ) // .unwrap(); // block = TyExpression { // expression: TyExpressionVariant::IfExp { // condition: Box::new(TyExpression { // expression, // return_type: type_engine.id_of_bool(), // span: self.span.clone(), // }), // then: Box::new(then_node), // r#else: Some(Box::new(block)), // }, // return_type: branch_return_type_id, // span: self.span.clone(), // }; // } // if RADIX_TRIE_DEBUG { // println!("return wildcard branch"); // } // Ok(block) // } // fn generate_radix_trie(&self, branches: &[(String, usize)]) -> Result<Trie, ErrorEmitted> { // let mut nodes = vec![TrieNode::default()]; // for (b, i) in branches.iter() { // let mut current = 0; // for c in b.chars() { // let c = c.to_string(); // if let Some(next) = nodes[current].next.get(&c) { // current = *next; // continue; // } // let next = nodes.len(); // nodes[current].next.insert(c, next); // current = next; // nodes.push(TrieNode::default()); // } // nodes[current].output = Some(*i); // } // // compress trie // let mut q = vec![0]; // while let Some(i) = q.pop() { // let mut current = nodes[i].clone(); // if current.next.len() == 1 { // let edge = current.next.pop_first().unwrap(); // let mut next = nodes[edge.1].clone(); // if next.next.len() == 1 { // let next_edge = next.next.pop_first().unwrap(); // let compressed_key = format!("{}{}", edge.0, next_edge.0); // nodes[i].next.clear(); // nodes[i].next.insert(compressed_key, next_edge.1); // nodes[i].output = next.output.take(); // q.push(i); // } else { // nodes[edge.1].previous = Some(i); // q.push(edge.1); // } // } else { // for (_, v) in current.next.iter() { // nodes[*v].previous = Some(i); // q.push(*v); // } // } // } // Ok(Trie { nodes }) // } // #[allow(clippy::too_many_arguments)] // fn generate_radix_tree_checks( // &self, // ctx: TypeCheckContext<'_>, // matched_value: &TyExpression, // branch_return_type_id: TypeId, // wildcard_return_expr: TyExpression, // trie: Trie, // packed_strings: &str, // ) -> Result<TyExpression, ErrorEmitted> { // let type_engine = ctx.engines.te(); // let packed_strings_expr = TyExpression { // expression: TyExpressionVariant::Literal(crate::language::Literal::String( // Span::from_string(packed_strings.to_string()), // )), // return_type: type_engine.id_of_string_slice(), // span: Span::dummy(), // }; // let expr = self.generate_radrix_trie_code( // matched_value, // packed_strings, // &packed_strings_expr, // &trie.nodes, // 0, // 0, // type_engine.id_of_bool(), // type_engine.id_of_u64(), // branch_return_type_id, // 1, // wildcard_return_expr, // ); // Ok(expr) // } // #[allow(clippy::too_many_arguments)] // fn generate_radrix_trie_code( // &self, // matched_value: &TyExpression, // packed_strings: &str, // packed_strings_expr: &TyExpression, // nodes: &[TrieNode], // slice_pos: usize, // current_node_index: usize, // bool_type_id: TypeId, // u64_type_id: TypeId, // branch_return_type_id: TypeId, // depth: usize, // block_when_all_fail: TyExpression, // ) -> TyExpression { // let current = &nodes[current_node_index]; // if let Some(output) = current.output { // assert!(current.next.is_empty()); // if RADIX_TRIE_DEBUG { // println!("{}return branch {:?}", " ".repeat(depth * 4), output); // } // let branch = &self.branches[output]; // return branch.result.clone(); // } // let mut block = block_when_all_fail.clone(); // for (prefix, next_node_index) in current.next.iter().rev() { // let start = current_node_index; // let end = current_node_index + prefix.len(); // let eq_len: u64 = end as u64 - start as u64; // let prefix_pos = packed_strings // .find(prefix) // .expect("prefix should be inside this string"); // if RADIX_TRIE_DEBUG { // println!( // "{}if str[{start}..{end}] == \"{prefix}\" at packed_string[{prefix_pos}]", // " ".repeat(depth * 4), // ); // } // let then_node = self.generate_radrix_trie_code( // matched_value, // packed_strings, // packed_strings_expr, // nodes, // end, // *next_node_index, // bool_type_id, // u64_type_id, // branch_return_type_id, // depth + 1, // block_when_all_fail.clone(), // ); // let prefix_pos = packed_strings // .find(prefix) // .expect("prefix should be inside this string"); // let expression = TyExpressionVariant::AsmExpression { // registers: vec![ // TyAsmRegisterDeclaration { // name: Ident::new_no_span("slice".into()), // initializer: Some(matched_value.clone()), // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("prefix".into()), // initializer: Some(packed_strings_expr.clone()), // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("slice_ptr".into()), // initializer: None, // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("prefix_ptr".into()), // initializer: None, // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("len".into()), // initializer: Some(TyExpression { // expression: TyExpressionVariant::Literal( // crate::language::Literal::U64(eq_len), // ), // return_type: u64_type_id, // span: Span::dummy(), // }), // }, // TyAsmRegisterDeclaration { // name: Ident::new_no_span("is_eq".into()), // initializer: None, // }, // ], // body: vec![ // AsmOp { // op_name: Ident::new_no_span("lw".into()), // op_args: vec![ // BaseIdent::new_no_span("slice_ptr".into()), // BaseIdent::new_no_span("slice".into()), // ], // immediate: Some(BaseIdent::new_no_span("i0".into())), // span: Span::dummy(), // }, // AsmOp { // op_name: Ident::new_no_span("addi".into()), // op_args: vec![ // BaseIdent::new_no_span("slice_ptr".into()), // BaseIdent::new_no_span("slice_ptr".into()), // ], // immediate: Some(BaseIdent::new_no_span(format!("i{slice_pos}"))), // span: Span::dummy(), // }, // AsmOp { // op_name: Ident::new_no_span("lw".into()), // op_args: vec![ // BaseIdent::new_no_span("prefix_ptr".into()), // BaseIdent::new_no_span("prefix".into()), // ], // immediate: Some(BaseIdent::new_no_span("i0".into())), // span: Span::dummy(), // }, // AsmOp { // op_name: Ident::new_no_span("addi".into()), // op_args: vec![ // BaseIdent::new_no_span("prefix_ptr".into()), // BaseIdent::new_no_span("prefix_ptr".into()), // ], // immediate: Some(BaseIdent::new_no_span(format!("i{prefix_pos}"))), // span: Span::dummy(), // }, // AsmOp { // op_name: Ident::new_no_span("meq".into()), // op_args: vec![ // BaseIdent::new_no_span("is_eq".into()), // BaseIdent::new_no_span("slice_ptr".into()), // BaseIdent::new_no_span("prefix_ptr".into()), // BaseIdent::new_no_span("len".into()), // ], // immediate: None, // span: Span::dummy(), // }, // ], // returns: Some(( // AsmRegister { // name: "is_eq".into(), // }, // Span::dummy(), // )), // whole_block_span: Span::dummy(), // }; // block = TyExpression { // expression: TyExpressionVariant::IfExp { // condition: Box::new(TyExpression { // expression, // return_type: bool_type_id, // span: Span::dummy(), // }), // then: Box::new(then_node), // r#else: Some(Box::new(block)), // }, // return_type: branch_return_type_id, // span: Span::dummy(), // }; // } // if RADIX_TRIE_DEBUG { // println!("{}return wildcard branch", " ".repeat(depth * 4),); // } // block // } fn desugar_to_typed_if_expression( &self, instantiate: Instantiate, mut ctx: TypeCheckContext<'_>, handler: &Handler, ) -> Result<TyExpression, ErrorEmitted> { // The typed if expression object that we will be building on to. // We will do it bottom up, starting from the final `else`. let mut typed_if_exp = None; // For every branch, bottom-up, means in reverse. for ty::TyMatchBranch { matched_or_variant_index_vars, condition, result, span: branch_span, .. } in self.branches.iter().rev() { if let ControlFlow::Break(_) = self.convert_to_typed_if_expression_inner_branch( branch_span.clone(), &mut typed_if_exp, condition, result, &instantiate, &mut ctx, handler, matched_or_variant_index_vars, )? { continue; } } Ok(typed_if_exp.expect("The expression exists because we have at least one branch.")) } #[allow(clippy::too_many_arguments)] fn convert_to_typed_if_expression_inner_branch( &self, branch_span: Span, typed_if_exp: &mut Option<TyExpression>, condition: &Option<TyExpression>, result: &TyExpression, instantiate: &Instantiate, ctx: &mut TypeCheckContext<'_>, handler: &Handler, matched_or_variant_index_vars: &Vec<(sway_types::BaseIdent, TyExpression)>, ) -> Result<ControlFlow<()>, ErrorEmitted> { if typed_if_exp.is_none() { // If the last match arm is a catch-all arm make its result the final else. // Note that this will always be the case with `if let` expressions that // desugar to match expressions. if condition.is_none() { *typed_if_exp = Some(result.clone()); return Ok(ControlFlow::Break(())); // Last branch added, move to the previous one. } else { // Otherwise instantiate the final `__revert`. let final_revert = instantiate.code_block_with_implicit_return_revert( INVALID_DESUGARED_MATCHED_EXPRESSION_SIGNAL, ); *typed_if_exp = Some(final_revert); // Continue with adding the last branch. }; } let mut ctx = ctx.by_ref().with_type_annotation(self.return_type_id); ctx.scoped(handler, Some(branch_span), |branch_ctx| { let result_span = result.span.clone(); let condition = condition .clone() .unwrap_or(instantiate.boolean_literal(true)); let if_exp = match instantiate_if_expression( handler, branch_ctx.by_ref(), condition, result.clone(), Some( typed_if_exp .clone() .expect("The previously created expression exist at this point."), ), // Put the previous if into else. result_span.clone(), ) { Ok(if_exp) => if_exp, Err(_) => { return Ok(ControlFlow::Break(())); } }; // If we are instantiating the final `else` block. // Create a new namespace for this branch result. *typed_if_exp = if matched_or_variant_index_vars.is_empty() { // No OR variants with vars. We just have to instantiate the if expression. Some(if_exp) } else { // We have matched OR variant index vars. // We need to add them to the block before the if expression. // The resulting `typed_if_exp` in this case is actually not // an if expression but rather a code block. let mut code_block_contents: Vec<ty::TyAstNode> = vec![]; for (var_ident, var_body) in matched_or_variant_index_vars { let var_decl = instantiate.var_decl(var_ident.clone(), var_body.clone()); let span = var_ident.span(); let _ = branch_ctx.insert_symbol(handler, var_ident.clone(), var_decl.clone()); code_block_contents.push(ty::TyAstNode { content: ty::TyAstNodeContent::Declaration(var_decl), span, }); } code_block_contents.push(ty::TyAstNode { content: ty::TyAstNodeContent::Expression(TyExpression { return_type: if_exp.return_type, span: if_exp.span.clone(), expression: ty::TyExpressionVariant::ImplicitReturn(Box::new(if_exp)), }), span: result_span.clone(), }); Some(ty::TyExpression { expression: ty::TyExpressionVariant::CodeBlock(ty::TyCodeBlock { whole_block_span: Span::dummy(), contents: code_block_contents, }), return_type: self.return_type_id, span: result_span.clone(), }) }; Ok(ControlFlow::Continue(())) }) } fn instantiate_if_expression_for_empty_match_expression( handler: &Handler, ctx: TypeCheckContext, instantiate: &Instantiate, value_type_id: TypeId, return_type_id: TypeId, span: Span, ) -> Result<ty::TyExpression, ErrorEmitted> { let type_engine = ctx.engines.te(); let decl_engine = ctx.engines.de(); // An empty match expression can happen only if the type we // are matching on does not have a valid constructor. // Otherwise, the match expression must be exhaustive, means // it must have at least one match arm. // In this case, we manually create a typed expression that is equivalent to // `if true { implicit_return }` where the implicit_return type is manually set // to be the return type of this typed match expression object. // // An example of such matching is when matching an empty enum. // For an example, see the "match_expressions_empty_enums" test. // // NOTE: This manual construction of the expression can (and // most likely will) lead to an otherwise improperly typed // expression, in most cases. if !type_engine .get(value_type_id) .has_valid_constructor(decl_engine) { let condition = instantiate.boolean_literal(true); let then_exp = ty::TyExpression { expression: ty::TyExpressionVariant::Tuple { fields: vec![] }, return_type: return_type_id, span: instantiate.dummy_span(), }; let inner_exp = ty::TyExpressionVariant::IfExp { condition: Box::new(condition), then: Box::new(then_exp.clone()), r#else: Option::Some(Box::new(then_exp)), }; let typed_if_exp = ty::TyExpression { expression: inner_exp, return_type: return_type_id, span: instantiate.dummy_span(), }; return Ok(typed_if_exp); } Err(handler.emit_err(CompileError::Internal( "unable to convert match exp to if exp", span, ))) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/matcher.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/matcher.rs
use indexmap::IndexMap; use crate::{ language::{ ty::{self, TyConstantDecl}, CallPath, Literal, }, semantic_analysis::{ ast_node::expression::typed_expression::{ instantiate_enum_unsafe_downcast, instantiate_struct_field_access, instantiate_tuple_index_access, }, TypeCheckContext, }, Ident, TypeId, UnifyCheck, }; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{span::Span, Named, Spanned}; /// A single requirement in the form `<lhs> == <rhs>` that has to be /// fulfilled for the match arm to match. pub(super) type MatchReq = (ty::TyExpression, ty::TyExpression); /// A single variable in the form `let <ident> = <expression>` /// that has to be extracted from the match arm. pub(super) type MatchVarDecl = (Ident, ty::TyExpression); /// A leaf of a match pattern can be either a requirement on the scrutinee or a /// variable declaration but not both at the same time. /// In the case of the catch-all `_` we will have neither a requirement nor /// a variable declaration. #[allow(clippy::large_enum_variant)] pub(super) enum ReqOrVarDecl { /// Neither a requirement, nor a variable declaration. /// Means a catch-all pattern. Neither, Req(MatchReq), VarDecl(MatchVarDecl), } /// A tree structure that describes: /// - the overall requirement that needs to be satisfied in order for the match arm to match /// - all variable declarations within the match arm /// /// The tree represents a logical expression that consists of equality comparisons, and /// lazy AND and OR operators. /// /// The leaves of the tree are either equality comparisons or eventual variable declarations /// or none of those in the case of catch-all `_` pattern or only a single rest `..` in structs. pub(super) struct ReqDeclTree { root: ReqDeclNode, } impl ReqDeclTree { /// Creates a new tree that contains only one leaf node with the /// [MatchReq] of the form `<lhs> == <rhs>`. fn req(req: MatchReq) -> Self { Self { root: ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::Req(req)), } } /// Creates a new tree that contains only the leaf node with the /// [MatchVarDecl] `decl`. fn decl(decl: MatchVarDecl) -> Self { Self { root: ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::VarDecl(decl)), } } /// Creates a new tree that contains only the leaf node with /// neither a requirement nor a variable declaration. fn none() -> Self { Self { root: ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::Neither), } } /// Creates a new tree that contains only an AND node /// made of `nodes`. fn and(nodes: Vec<ReqDeclNode>) -> Self { Self { root: ReqDeclNode::And(nodes), } } /// Creates a new tree that contains only an OR node /// made of `nodes`. fn or(nodes: Vec<ReqDeclNode>) -> Self { Self { root: ReqDeclNode::Or(nodes), } } pub fn root(&self) -> &ReqDeclNode { &self.root } } /// A single node in the [ReqDeclTree]. #[allow(clippy::large_enum_variant)] pub(super) enum ReqDeclNode { /// The leaf node. Contains the information about a single requirement or /// variable declaration. /// E.g., a catch all `_` will have neither a requirement nor a variable declaration. /// E.g., a match arm variable `x` cannot have a requirement (it acts as catch all) /// but it will have the declaration of the variable `x`. /// E.g., a literal `123` will have a requirement on the scrutinee e.g. `struct.x == 123`. ReqOrVarDecl(ReqOrVarDecl), /// Represent the requirements and declarations connected with the lazy AND operator, /// if there are more than two of them. /// Notice that the vector of contained nodes can be empty or have only one element. /// AND semantics is applied if there are two or more elements. /// E.g., requirements coming from the struct and tuple patterns /// must all be fulfilled in order for the whole pattern to match. And(Vec<ReqDeclNode>), /// Represent the requirements and declarations connected with the lazy OR operator, /// if there are more than two of them. /// Notice that the vector of contained nodes can be empty or have only one element. /// OR semantics is applied if there are two or more elements. /// Only the requirements coming from the individual variants of an OR match arm /// will be connected with the OR operator. Or(Vec<ReqDeclNode>), } impl ReqDeclNode { /// Creates a new leaf node with the [MatchReq] of the form `<lhs> == <rhs>`. fn req(req: MatchReq) -> Self { ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::Req(req)) } /// Creates a new leaf node with the [MatchVarDecl] `decl`. fn decl(decl: MatchVarDecl) -> Self { ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::VarDecl(decl)) } } /// The [matcher] returns the [ReqDeclTree] for the given `scrutinee` that tries /// to match the given expression `exp`. /// /// Given the following example: /// /// ```ignore /// struct Point { /// x: u64, /// y: u64 /// } /// /// let p = Point { /// x: 42, /// y: 24 /// }; /// /// match p { /// Point { x, y: 5 } => { x }, // 1. /// Point { x, y: 5 } | Point { x, y: 10 } => { x }, // 2. /// Point { x: 10, y: 24 } => { 1 }, // 3. /// Point { x: 22, .. } => { 2 }, // 4. /// Point { .. } => { 3 }, // 5. /// _ => 0 // 6. /// } /// ``` /// /// the returned [ReqDeclTree] for each match arm will have the following form /// (square brackets represent each individual leaf node [ReqDeclNode::ReqOrVarDecl]): /// /// ```ignore /// 1. /// && /// / \ /// [let x = p.x] [p.y == 5] /// /// 2. /// || /// ___________/ \____________ /// && && /// / \ / \ /// [let x = p.x] [p.y == 5] [let x = p.x] [p.y == 10] /// /// 3. /// && /// / \ /// [p.x == 10] [p.y == 24] /// /// 4. /// && // Note that this AND node has only one childe node. /// | /// [p.x == 22] /// /// 5. /// && // Note that this AND node has only one childe node. /// | /// [None] /// /// 6. /// [None] /// ``` pub(super) fn matcher( handler: &Handler, ctx: TypeCheckContext, match_value: &ty::TyExpression, exp: &ty::TyExpression, scrutinee: ty::TyScrutinee, ) -> Result<ReqDeclTree, ErrorEmitted> { let ty::TyScrutinee { variant, type_id, span, } = scrutinee; let type_engine = ctx.engines.te(); // unify the type of the scrutinee with the type of the expression handler.scope(|h| { type_engine.unify(h, ctx.engines, type_id, exp.return_type, &span, "", || None); Ok(()) })?; match variant { ty::TyScrutineeVariant::Or(alternatives) => { match_or(handler, ctx, match_value, exp, alternatives) } ty::TyScrutineeVariant::CatchAll => Ok(ReqDeclTree::none()), ty::TyScrutineeVariant::Literal(value) => Ok(match_literal(exp, value, span)), ty::TyScrutineeVariant::Variable(name) => Ok(match_variable(exp, name)), ty::TyScrutineeVariant::Constant(_, _, const_decl) => { Ok(match_constant(ctx, exp, const_decl, span)) } ty::TyScrutineeVariant::StructScrutinee { struct_ref: _, fields, .. } => match_struct(handler, ctx, match_value, exp, fields), ty::TyScrutineeVariant::EnumScrutinee { variant, call_path_decl, value, .. } => match_enum( handler, ctx, match_value, exp, *variant, call_path_decl, *value, span, ), ty::TyScrutineeVariant::Tuple(elems) => { match_tuple(handler, ctx, match_value, exp, elems, span) } } } fn match_or( handler: &Handler, mut ctx: TypeCheckContext, match_value: &ty::TyExpression, exp: &ty::TyExpression, alternatives: Vec<ty::TyScrutinee>, ) -> Result<ReqDeclTree, ErrorEmitted> { return handler.scope(|handler| { let mut nodes = vec![]; let mut variables_in_alternatives: Vec<(Span, Vec<(Ident, TypeId)>)> = vec![]; // Span is the span of the alternative. for alternative in alternatives { let alternative_span = alternative.span.clone(); // We want to collect as many errors as possible. // If an alternative has any internal issues we will emit them, ignore that alternative, // but still process the remaining alternatives. let alternative_req_decl_tree = match matcher(handler, ctx.by_ref(), match_value, exp, alternative) { Ok(req_decl_tree) => req_decl_tree, Err(_) => continue, }; variables_in_alternatives.push(( alternative_span, variable_declarations(&alternative_req_decl_tree), )); nodes.push(alternative_req_decl_tree.root); } // All the first occurrences of variables in order of appearance. let mut variables: IndexMap<&Ident, TypeId> = IndexMap::new(); for (ident, type_id) in variables_in_alternatives.iter().flat_map(|(_, vars)| vars) { variables.entry(ident).or_insert(*type_id); } // At this stage, in the matcher, we are not concerned about the duplicates // in individual alternatives. // Check that we have all variables in all alternatives. for (variable, _) in variables.iter() { let missing_in_alternatives: Vec<Span> = variables_in_alternatives .iter() .filter_map(|(span, vars)| { (!vars.iter().any(|(ident, _)| ident == *variable)).then_some(span.clone()) }) .collect(); if missing_in_alternatives.is_empty() { continue; } handler.emit_err(CompileError::MatchArmVariableNotDefinedInAllAlternatives { match_value: match_value.span.clone(), match_type: ctx.engines.help_out(match_value.return_type).to_string(), variable: (*variable).clone(), missing_in_alternatives, }); } // Check that the variable types are the same in all alternatives // (assuming that the variable exist in the alternative). // To the equality, we accept type aliases and the types they encapsulate // to be equal, otherwise, we are strict, e.g., no coercion between u8 and u16, etc. let equality = UnifyCheck::non_dynamic_equality(ctx.engines); for (variable, type_id) in variables { let type_mismatched_vars = variables_in_alternatives.iter().flat_map(|(_, vars)| { vars.iter().filter_map(|(ident, var_type_id)| { (ident == variable && !equality.check(type_id, *var_type_id)) .then_some((ident.clone(), *var_type_id)) }) }); for type_mismatched_var in type_mismatched_vars { handler.emit_err(CompileError::MatchArmVariableMismatchedType { match_value: match_value.span.clone(), match_type: ctx.engines.help_out(match_value.return_type).to_string(), variable: type_mismatched_var.0, first_definition: variable.span(), expected: ctx.engines.help_out(type_id).to_string(), received: ctx.engines.help_out(type_mismatched_var.1).to_string(), }); } } Ok(ReqDeclTree::or(nodes)) }); /// Returns all [MatchVarDecl]s found in the match arm /// in order of their appearance from left to right. fn variable_declarations(req_decl_tree: &ReqDeclTree) -> Vec<(Ident, TypeId)> { let mut result = vec![]; collect_variable_declarations(&req_decl_tree.root, &mut result); return result; fn collect_variable_declarations( node: &ReqDeclNode, declarations: &mut Vec<(Ident, TypeId)>, ) { // Traverse the tree depth-first, left to right. match node { ReqDeclNode::ReqOrVarDecl(ReqOrVarDecl::VarDecl((ident, exp))) => { declarations.push((ident.clone(), exp.return_type)); } ReqDeclNode::ReqOrVarDecl(_) => (), ReqDeclNode::And(nodes) | ReqDeclNode::Or(nodes) => { for node in nodes { collect_variable_declarations(node, declarations); } } } } } } fn match_literal(exp: &ty::TyExpression, scrutinee: Literal, span: Span) -> ReqDeclTree { let req = ( exp.to_owned(), ty::TyExpression { expression: ty::TyExpressionVariant::Literal(scrutinee), return_type: exp.return_type, span, }, ); ReqDeclTree::req(req) } fn match_variable(exp: &ty::TyExpression, scrutinee_name: Ident) -> ReqDeclTree { let decl = (scrutinee_name, exp.to_owned()); ReqDeclTree::decl(decl) } fn match_constant( ctx: TypeCheckContext, exp: &ty::TyExpression, const_decl: TyConstantDecl, span: Span, ) -> ReqDeclTree { let name = const_decl.name().clone(); let return_type = const_decl.type_ascription.type_id; let req = ( exp.to_owned(), ty::TyExpression { expression: ty::TyExpressionVariant::ConstantExpression { span: span.clone(), decl: Box::new(const_decl), call_path: Some(CallPath::from(name).to_fullpath(ctx.engines(), ctx.namespace())), }, return_type, span, }, ); ReqDeclTree::req(req) } fn match_struct( handler: &Handler, mut ctx: TypeCheckContext, match_value: &ty::TyExpression, exp: &ty::TyExpression, fields: Vec<ty::TyStructScrutineeField>, ) -> Result<ReqDeclTree, ErrorEmitted> { let mut nodes = vec![]; for ty::TyStructScrutineeField { field, scrutinee, span: field_span, field_def_name: _, } in fields.into_iter() { // Get the expression that access the struct field e.g., `my_struct.x`. let subfield = instantiate_struct_field_access( handler, ctx.engines(), ctx.namespace(), exp.clone(), field.clone(), field_span, )?; match scrutinee { // If there is no scrutinee, we simply have the struct field name. // This means declaring a variable with the same name as the struct field, // initialized to the values of the subfield expression. None => { nodes.push(ReqDeclNode::decl((field, subfield))); } // If the scrutinee exist, we have the form `<field>: <match_sub_pattern>`. // We need to match the subfield against the sub pattern. Some(match_sub_pattern) => { let req_decl_tree = matcher( handler, ctx.by_ref(), match_value, &subfield, match_sub_pattern, )?; nodes.push(req_decl_tree.root); } } } Ok(ReqDeclTree::and(nodes)) } #[allow(clippy::too_many_arguments)] fn match_enum( handler: &Handler, ctx: TypeCheckContext, match_value: &ty::TyExpression, exp: &ty::TyExpression, variant: ty::TyEnumVariant, call_path_decl: ty::TyDecl, enum_value_scrutinee: ty::TyScrutinee, span: Span, ) -> Result<ReqDeclTree, ErrorEmitted> { let type_engine = ctx.engines.te(); let mut nodes = vec![]; // The first requirement is that the enum variant behind the `exp` is // of the kind `variant`. `exp is variant` is expressed as `EnumTag(<exp>) == <variant.tag>`. let enum_variant_req = ( ty::TyExpression { expression: ty::TyExpressionVariant::EnumTag { exp: Box::new(exp.clone()), }, return_type: type_engine.id_of_u64(), span: exp.span.clone(), }, ty::TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::U64(variant.tag as u64)), return_type: type_engine.id_of_u64(), span: exp.span.clone(), }, ); nodes.push(ReqDeclNode::req(enum_variant_req)); // Afterwards, we need to collect the requirements for the enum variant underlying value. // If the enum variant does not have a value the `enum_value_scrutinee` will be of the // scrutinee variant `CatchAll` that will produce a ReqDeclTree without further requirements // or variable declarations. let unsafe_downcast = instantiate_enum_unsafe_downcast(exp, variant, call_path_decl, span); let req_decl_tree = matcher( handler, ctx, match_value, &unsafe_downcast, enum_value_scrutinee, )?; nodes.push(req_decl_tree.root); Ok(ReqDeclTree::and(nodes)) } fn match_tuple( handler: &Handler, mut ctx: TypeCheckContext, match_value: &ty::TyExpression, exp: &ty::TyExpression, elems: Vec<ty::TyScrutinee>, span: Span, ) -> Result<ReqDeclTree, ErrorEmitted> { let mut nodes = vec![]; for (pos, elem) in elems.into_iter().enumerate() { let tuple_index_access = instantiate_tuple_index_access( handler, ctx.engines(), exp.clone(), pos, span.clone(), span.clone(), )?; let req_decl_tree = matcher( handler, ctx.by_ref(), match_value, &tuple_index_access, elem, )?; nodes.push(req_decl_tree.root); } Ok(ReqDeclTree::and(nodes)) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/instantiate.rs
sway-core/src/semantic_analysis/ast_node/expression/match_expression/typed/instantiate.rs
use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::{Ident, Span}; use crate::{ language::{ty, LazyOp, Literal}, semantic_analysis::{ typed_expression::{instantiate_lazy_operator, instantiate_tuple_index_access}, TypeCheckContext, }, Engines, TypeId, }; /// Simplifies instantiation of desugared code in the match expression and match arms. pub(super) struct Instantiate { /// Both dummy span for instantiation of desugared elements /// and error span for internal compiler errors. span: Span, u64_type: TypeId, boolean_type: TypeId, revert_type: TypeId, } impl Instantiate { pub(super) fn new(engines: &Engines, span: Span) -> Self { let type_engine = engines.te(); Self { span, u64_type: type_engine.id_of_u64(), boolean_type: type_engine.id_of_bool(), revert_type: type_engine.id_of_never(), } } pub(super) fn dummy_span(&self) -> Span { self.span.clone() } pub(super) fn error_span(&self) -> Span { self.span.clone() } pub(super) fn u64_type(&self) -> TypeId { self.u64_type } /// Instantiates a [ty::TyDecl::VariableDecl] for an immutable variable of the form `let <name> = <body>;`. pub(super) fn var_decl(&self, name: Ident, body: ty::TyExpression) -> ty::TyDecl { let return_type = body.return_type; let type_ascription = body.return_type.into(); ty::TyDecl::VariableDecl(Box::new(ty::TyVariableDecl { name, body, mutability: ty::VariableMutability::Immutable, return_type, type_ascription, })) } /// Instantiates a [ty::TyExpressionVariant::VariableExpression] for accessing an immutable variable /// `name` of the type `type_id`. pub(super) fn var_exp(&self, name: Ident, type_id: TypeId) -> ty::TyExpression { ty::TyExpression { expression: ty::TyExpressionVariant::VariableExpression { name, span: self.dummy_span(), mutability: ty::VariableMutability::Immutable, call_path: None, }, return_type: type_id, span: self.dummy_span(), } } /// Instantiates a [ty::TyExpressionVariant::Literal] that represents a `u64` `value`. pub(super) fn u64_literal(&self, value: u64) -> ty::TyExpression { ty::TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::U64(value)), return_type: self.u64_type, span: self.dummy_span(), } } /// Instantiates a [ty::TyExpressionVariant::Literal] that represents a `boolean` `value`. pub(super) fn boolean_literal(&self, value: bool) -> ty::TyExpression { ty::TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::Boolean(value)), return_type: self.boolean_type, span: self.dummy_span(), } } /// Instantiates an [Ident] with overridden `name`. pub(super) fn ident(&self, name: String) -> Ident { Ident::new_with_override(name, self.dummy_span()) } /// Instantiates a [ty::TyExpressionVariant::CodeBlock] with a single /// [ty::TyAstNodeContent::ImplicitReturnExpression] that returns the `value`. pub(super) fn code_block_with_implicit_return_u64(&self, value: u64) -> ty::TyExpression { let ret_expr = ty::TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::U64(value)), return_type: self.u64_type, span: self.dummy_span(), }; ty::TyExpression { expression: ty::TyExpressionVariant::CodeBlock(ty::TyCodeBlock { whole_block_span: self.dummy_span(), contents: vec![ty::TyAstNode { content: ty::TyAstNodeContent::Expression(ty::TyExpression { return_type: ret_expr.return_type, span: ret_expr.span.clone(), expression: ty::TyExpressionVariant::ImplicitReturn(Box::new(ret_expr)), }), span: self.dummy_span(), }], }), return_type: self.u64_type, span: self.dummy_span(), } } /// Instantiates a [ty::TyExpressionVariant::CodeBlock] with a single /// [ty::TyAstNodeContent::ImplicitReturnExpression] that returns calls `__revert(revert_code)`. pub(super) fn code_block_with_implicit_return_revert( &self, revert_code: u64, ) -> ty::TyExpression { let ret_expr = ty::TyExpression { expression: ty::TyExpressionVariant::IntrinsicFunction(ty::TyIntrinsicFunctionKind { kind: sway_ast::Intrinsic::Revert, arguments: vec![ty::TyExpression { expression: ty::TyExpressionVariant::Literal(Literal::U64(revert_code)), return_type: self.u64_type, span: self.dummy_span(), }], type_arguments: vec![], span: self.dummy_span(), }), return_type: self.revert_type, span: self.dummy_span(), }; ty::TyExpression { expression: ty::TyExpressionVariant::CodeBlock(ty::TyCodeBlock { whole_block_span: self.dummy_span(), contents: vec![ty::TyAstNode { content: ty::TyAstNodeContent::Expression(ty::TyExpression { return_type: ret_expr.return_type, span: ret_expr.span.clone(), expression: ty::TyExpressionVariant::ImplicitReturn(Box::new(ret_expr)), }), span: self.dummy_span(), }], }), return_type: self.revert_type, span: self.dummy_span(), } } /// Instantiates an expression equivalent to `<lhs> == <rhs>`. pub(super) fn eq_result( &self, handler: &Handler, ctx: TypeCheckContext, lhs: ty::TyExpression, rhs: ty::TyExpression, ) -> Result<ty::TyExpression, ErrorEmitted> { ty::TyExpression::std_ops_eq(handler, ctx, vec![lhs, rhs], self.dummy_span()) } /// Instantiates an expression equivalent to `<lhs> != <rhs>`. pub(super) fn neq_result( &self, handler: &Handler, ctx: TypeCheckContext, lhs: ty::TyExpression, rhs: ty::TyExpression, ) -> Result<ty::TyExpression, ErrorEmitted> { ty::TyExpression::std_ops_neq(handler, ctx, vec![lhs, rhs], self.dummy_span()) } /// Instantiates an expression equivalent to `<lhs> == <rhs>`. The method expects that /// the expression can be instantiated and panics if that's not the case. pub(super) fn eq( &self, ctx: TypeCheckContext, lhs: ty::TyExpression, rhs: ty::TyExpression, ) -> ty::TyExpression { ty::TyExpression::std_ops_eq(&Handler::default(), ctx, vec![lhs, rhs], self.dummy_span()) .expect("Instantiating `std::ops::eq` is expected to always work.") } /// Instantiates a [ty::TyExpressionVariant::TupleElemAccess] `<tuple_variable>.<index>`. The method expects that /// the expression can be instantiated and panics if that's not the case. pub(super) fn tuple_elem_access( &self, engines: &Engines, tuple_variable: ty::TyExpression, index: usize, ) -> ty::TyExpression { instantiate_tuple_index_access( &Handler::default(), engines, tuple_variable, index, self.dummy_span(), self.dummy_span(), ) .expect("Instantiating tuple element access expression is expected to always work.") } /// Instantiates a [LazyOp::And] expression of the form `<lhs> && <rhs>`. pub(super) fn lazy_and( &self, lhs: ty::TyExpression, rhs: ty::TyExpression, ) -> ty::TyExpression { instantiate_lazy_operator(LazyOp::And, lhs, rhs, self.boolean_type, self.dummy_span()) } /// Instantiates a [LazyOp::Or] expression of the form `<lhs> || <rhs>`. pub(super) fn lazy_or(&self, lhs: ty::TyExpression, rhs: ty::TyExpression) -> ty::TyExpression { instantiate_lazy_operator(LazyOp::Or, lhs, rhs, self.boolean_type, self.dummy_span()) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/asm_builder.rs
sway-core/src/asm_generation/asm_builder.rs
use super::FinalizedAsm; use crate::{asm_lang::Label, BuildConfig}; use sway_error::handler::{ErrorEmitted, Handler}; use sway_ir::{ConfigContent, Function}; pub trait AsmBuilder { fn func_to_labels(&mut self, func: &Function) -> (Label, Label); fn compile_configurable(&mut self, config: &ConfigContent); fn compile_function( &mut self, handler: &Handler, function: Function, ) -> Result<(), ErrorEmitted>; fn finalize( self, handler: &Handler, build_config: Option<&BuildConfig>, fallback_fn: Option<Label>, ) -> Result<FinalizedAsm, ErrorEmitted>; }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/abi.rs
sway-core/src/asm_generation/abi.rs
use super::EvmAbiResult; #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] pub enum ProgramABI { Fuel(fuel_abi_types::abi::program::ProgramABI), Evm(EvmAbiResult), MidenVM(()), }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/finalized_asm.rs
sway-core/src/asm_generation/finalized_asm.rs
use super::instruction_set::InstructionSet; use super::{ fuel::{checks, data_section::DataSection}, ProgramABI, ProgramKind, }; use crate::asm_generation::fuel::data_section::{Datum, Entry, EntryName}; use crate::asm_lang::allocated_ops::{AllocatedInstruction, AllocatedOp, FuelAsmData}; use crate::decl_engine::DeclRefFunction; use crate::source_map::SourceMap; use crate::BuildConfig; use etk_asm::asm::Assembler; use fuel_vm::fuel_asm::{Imm06, Imm12, Imm18, Imm24, Instruction, RegId}; use sway_error::error::CompileError; use sway_error::handler::{ErrorEmitted, Handler}; use sway_types::span::Span; use sway_types::SourceEngine; use std::{collections::BTreeMap, fmt}; /// Represents an ASM set which has had register allocation, jump elimination, and optimization /// applied to it #[derive(Clone, serde::Serialize)] pub struct AsmInformation { pub bytecode_size: u64, pub data_section: DataSectionInformation, } #[derive(Default, Clone, Debug, serde::Serialize)] pub struct DataSectionInformation { /// The total size of the data section in bytes pub size: u64, /// The used size of the data section in bytes pub used: u64, /// The data to be put in the data section of the asm pub value_pairs: Vec<Entry>, } /// Represents an ASM set which has had register allocation, jump elimination, and optimization /// applied to it #[derive(Clone)] pub struct FinalizedAsm { pub data_section: DataSection, pub program_section: InstructionSet, pub program_kind: ProgramKind, pub entries: Vec<FinalizedEntry>, pub abi: Option<ProgramABI>, } #[derive(Clone, Debug)] pub struct FinalizedEntry { /// The original entry point function name. pub fn_name: String, /// The immediate instruction offset at which the entry function begins. pub imm: u64, /// The function selector (only `Some` for contract ABI methods). pub selector: Option<[u8; 4]>, /// If this entry is constructed from a test function contains the declaration id for that /// function, otherwise contains `None`. pub test_decl_ref: Option<DeclRefFunction>, } /// The bytecode for a sway program as well as the byte offsets of configuration-time constants in /// the bytecode. pub struct CompiledBytecode { pub bytecode: Vec<u8>, pub named_data_section_entries_offsets: BTreeMap<String, u64>, } impl FinalizedAsm { pub(crate) fn to_bytecode_mut( &mut self, handler: &Handler, source_map: &mut SourceMap, source_engine: &SourceEngine, build_config: &BuildConfig, ) -> Result<CompiledBytecode, ErrorEmitted> { match &self.program_section { InstructionSet::Fuel { ops } => Ok(to_bytecode_mut( ops, &mut self.data_section, source_map, source_engine, build_config, )), InstructionSet::Evm { ops } => { let mut assembler = Assembler::new(); if let Err(e) = assembler.push_all(ops.clone()) { Err(handler.emit_err(CompileError::InternalOwned(e.to_string(), Span::dummy()))) } else { Ok(CompiledBytecode { bytecode: assembler.take(), named_data_section_entries_offsets: BTreeMap::new(), }) } } } } } impl FinalizedEntry { /// We assume the entry point is for a test function in the case it is neither an ABI method /// (no selector) or it is not "main". pub fn is_test(&self) -> bool { self.selector.is_none() && self.fn_name != sway_types::constants::DEFAULT_ENTRY_POINT_FN_NAME } } impl fmt::Display for FinalizedAsm { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}\n{}", self.program_section, self.data_section) } } fn to_bytecode_mut( ops: &[AllocatedOp], data_section: &mut DataSection, source_map: &mut SourceMap, source_engine: &SourceEngine, build_config: &BuildConfig, ) -> CompiledBytecode { fn op_size_in_bytes(data_section: &DataSection, item: &AllocatedOp) -> u64 { match &item.opcode { AllocatedInstruction::LoadDataId(_reg, data_label) if !data_section .has_copy_type(data_label) .expect("data label references non existent data -- internal error") => { 8 } AllocatedInstruction::AddrDataId(_, _data_id) => 8, AllocatedInstruction::ConfigurablesOffsetPlaceholder => 8, AllocatedInstruction::DataSectionOffsetPlaceholder => 8, AllocatedInstruction::BLOB(count) => count.value() as u64 * 4, AllocatedInstruction::CFEI(i) | AllocatedInstruction::CFSI(i) if i.value() == 0 => 0, _ => 4, } } // Some instructions may be omitted or expanded into multiple instructions, so we compute, // using `op_size_in_bytes`, exactly how many ops will be generated to calculate the offset. let mut offset_to_data_section_in_bytes = ops .iter() .fold(0, |acc, item| acc + op_size_in_bytes(data_section, item)); // A noop is inserted in ASM generation if required, to word-align the data section. let mut ops_padded = Vec::new(); let ops = if offset_to_data_section_in_bytes & 7 == 0 { ops } else { ops_padded.reserve(ops.len() + 1); ops_padded.extend(ops.iter().cloned()); ops_padded.push(AllocatedOp { opcode: AllocatedInstruction::NOOP, comment: "word-alignment of data section".into(), owning_span: None, }); offset_to_data_section_in_bytes += 4; &ops_padded }; let mut offset_from_instr_start = 0; for op in ops.iter() { match &op.opcode { AllocatedInstruction::LoadDataId(_reg, data_label) if !data_section .has_copy_type(data_label) .expect("data label references non existent data -- internal error") => { // For non-copy type loads, pre-insert pointers into the data_section so that // from this point on, the data_section remains immutable. This is necessary // so that when we take addresses of configurables, that address doesn't change // later on if a non-configurable is added to the data-section. let offset_bytes = data_section.data_id_to_offset(data_label) as u64; // The -4 is because $pc is added in the *next* instruction. let pointer_offset_from_current_instr = offset_to_data_section_in_bytes - offset_from_instr_start + offset_bytes - 4; data_section.append_pointer(pointer_offset_from_current_instr); } _ => (), } offset_from_instr_start += op_size_in_bytes(data_section, op); } let mut bytecode = Vec::with_capacity(offset_to_data_section_in_bytes as usize); if build_config.print_bytecode { println!(";; --- START OF TARGET BYTECODE ---\n"); } let mut last_span = None; let mut indentation = if build_config.print_bytecode_spans { 4 } else { 0 }; let mut half_word_ix = 0; let mut offset_from_instr_start = 0; for op in ops.iter() { let span = op.owning_span.clone(); let fuel_op = op.to_fuel_asm( offset_to_data_section_in_bytes, offset_from_instr_start, data_section, ); offset_from_instr_start += op_size_in_bytes(data_section, op); match fuel_op { FuelAsmData::DatasectionOffset(data) => { if build_config.print_bytecode { print!("{}{:#010x} ", " ".repeat(indentation), bytecode.len()); println!(" ;; {data:?}"); } // Static assert to ensure that we're only dealing with DataSectionOffsetPlaceholder, // a one-word (8 bytes) data within the code. No other uses are known. let _: [u8; 8] = data; bytecode.extend(data.iter().cloned()); half_word_ix += 2; } FuelAsmData::ConfigurablesOffset(data) => { if build_config.print_bytecode { print!("{}{:#010x} ", " ".repeat(indentation), bytecode.len()); println!(" ;; {data:?}"); } // Static assert to ensure that we're only dealing with ConfigurablesOffsetPlaceholder, // a 1-word (8 bytes) data within the code. No other uses are known. let _: [u8; 8] = data; bytecode.extend(data.iter().cloned()); half_word_ix += 2; } FuelAsmData::Instructions(instructions) => { for instruction in instructions { // Print original source span only once if build_config.print_bytecode_spans { last_span = match (last_span, &span) { (None, Some(span)) => { indentation = 4; let line_col = span.start_line_col_one_index(); println!( "{} @ {}:{}:{}", span.as_str(), span.source_id() .map(|source_id| source_engine.get_path(source_id)) .map(|x| x.display().to_string()) .unwrap_or("<autogenerated>".to_string()), line_col.line, line_col.col ); Some(span.clone()) } (Some(last), Some(span)) if last != *span => { indentation = 4; let line_col = span.start_line_col_one_index(); println!( "{} @ {}:{}:{}", span.as_str(), span.source_id() .map(|source_id| source_engine.get_path(source_id)) .map(|x| x.display().to_string()) .unwrap_or("<autogenerated>".to_string()), line_col.line, line_col.col ); Some(span.clone()) } (last, _) => last, }; } if build_config.print_bytecode { print!("{}{:#010x} ", " ".repeat(indentation), bytecode.len()); print_instruction(&instruction); } if let Some(span) = &span { source_map.insert(source_engine, half_word_ix, span); } let bytes = instruction.to_bytes(); if build_config.print_bytecode { println!(";; {bytes:?}") } bytecode.extend(bytes.iter()); half_word_ix += 1; } } } } if build_config.print_bytecode { println!(".data_section:"); let offset = bytecode.len(); fn print_entry(indentation: usize, offset: usize, pair: &Entry) { print!("{}{:#010x} ", " ".repeat(indentation), offset); match &pair.value { Datum::Byte(w) => println!(".byte i{w}, as hex {w:02X}"), Datum::Word(w) => { println!(".word i{w}, as hex be bytes ({:02X?})", w.to_be_bytes()) } Datum::ByteArray(bs) => { print!(".bytes as hex ({bs:02X?}), len i{}, as ascii \"", bs.len()); for b in bs { print!( "{}", if *b == b' ' || b.is_ascii_graphic() { *b as char } else { '.' } ); } println!("\""); } Datum::Slice(bs) => { print!(".slice as hex ({bs:02X?}), len i{}, as ascii \"", bs.len()); for b in bs { print!( "{}", if *b == b' ' || b.is_ascii_graphic() { *b as char } else { '.' } ); } println!("\""); } Datum::Collection(els) => { println!(".collection"); for e in els { print_entry(indentation + 1, offset, e); } } }; } for (i, entry) in data_section.iter_all_entries().enumerate() { let entry_offset = data_section.absolute_idx_to_offset(i); print_entry(indentation, offset + entry_offset, &entry); } println!(";; --- END OF TARGET BYTECODE ---\n"); } assert_eq!(half_word_ix * 4, offset_to_data_section_in_bytes as usize); assert_eq!(bytecode.len(), offset_to_data_section_in_bytes as usize); let num_nonconfigurables = data_section.non_configurables.len(); let named_data_section_entries_offsets = data_section .configurables .iter() .enumerate() .map(|(id, entry)| { let EntryName::Configurable(name) = &entry.name else { panic!("Non-configurable in configurables part of datasection"); }; ( name.clone(), offset_to_data_section_in_bytes + data_section.absolute_idx_to_offset(id + num_nonconfigurables) as u64, ) }) .collect::<BTreeMap<String, u64>>(); let mut data_section = data_section.serialize_to_bytes(); bytecode.append(&mut data_section); CompiledBytecode { bytecode, named_data_section_entries_offsets, } } // Code to pretty print bytecode fn print_reg(r: RegId) -> String { match r { RegId::BAL => "$bal".to_string(), RegId::CGAS => "$cgas".to_string(), RegId::ERR => "$err".to_string(), RegId::FLAG => "$flag".to_string(), RegId::FP => "$fp".to_string(), RegId::GGAS => "$ggas".to_string(), RegId::HP => "$hp".to_string(), RegId::IS => "$is".to_string(), RegId::OF => "$of".to_string(), RegId::ONE => "$one".to_string(), RegId::PC => "$pc".to_string(), RegId::RET => "$ret".to_string(), RegId::RETL => "$retl".to_string(), RegId::SP => "$sp".to_string(), RegId::SSP => "$ssp".to_string(), RegId::WRITABLE => "$writable".to_string(), RegId::ZERO => "$zero".to_string(), _ => format!("R{:?}", r.to_u8()), } } trait Args { fn print(&self) -> String; } impl Args for RegId { fn print(&self) -> String { print_reg(*self) } } impl Args for Imm06 { fn print(&self) -> String { format!("{:#x}", self.to_u8()) } } impl Args for Imm12 { fn print(&self) -> String { format!("{:#x}", self.to_u16()) } } impl Args for Imm18 { fn print(&self) -> String { format!("{:#x}", self.to_u32()) } } impl Args for Imm24 { fn print(&self) -> String { format!("{:#x}", self.to_u32()) } } impl Args for () { fn print(&self) -> String { String::new() } } impl<A: Args> Args for (A,) { fn print(&self) -> String { self.0.print() } } impl<A: Args, B: Args> Args for (A, B) { fn print(&self) -> String { format!("{} {}", self.0.print(), self.1.print()) } } impl<A: Args, B: Args, C: Args> Args for (A, B, C) { fn print(&self) -> String { format!("{} {} {}", self.0.print(), self.1.print(), self.2.print()) } } impl<A: Args, B: Args, C: Args, D: Args> Args for (A, B, C, D) { fn print(&self) -> String { format!( "{} {} {} {}", self.0.print(), self.1.print(), self.2.print(), self.3.print() ) } } fn f(name: &str, args: impl Args) { let mut line = format!("{name} {}", args.print()); let s = " ".repeat(48 - line.len()); line.push_str(&s); print!("{line}") } fn print_instruction(op: &Instruction) { match op { Instruction::ADD(x) => f("ADD", x.unpack()), Instruction::AND(x) => f("AND", x.unpack()), Instruction::DIV(x) => f("DIV", x.unpack()), Instruction::EQ(x) => f("EQ", x.unpack()), Instruction::EXP(x) => f("EXP", x.unpack()), Instruction::GT(x) => f("GT", x.unpack()), Instruction::LT(x) => f("LT", x.unpack()), Instruction::MLOG(x) => f("MLOG", x.unpack()), Instruction::MROO(x) => f("MROO", x.unpack()), Instruction::MOD(x) => f("MOD", x.unpack()), Instruction::MOVE(x) => f("MOVE", x.unpack()), Instruction::MUL(x) => f("MUL", x.unpack()), Instruction::NOT(x) => f("NOT", x.unpack()), Instruction::OR(x) => f("OR", x.unpack()), Instruction::SLL(x) => f("SLL", x.unpack()), Instruction::SRL(x) => f("SRL", x.unpack()), Instruction::SUB(x) => f("SUB", x.unpack()), Instruction::XOR(x) => f("XOR", x.unpack()), Instruction::MLDV(x) => f("MLDV", x.unpack()), Instruction::RET(x) => f("RET", x.unpack()), Instruction::RETD(x) => f("RETD", x.unpack()), Instruction::ALOC(x) => f("ALOC", x.unpack()), Instruction::MCL(x) => f("MCL", x.unpack()), Instruction::MCP(x) => f("MCP", x.unpack()), Instruction::MEQ(x) => f("MEQ", x.unpack()), Instruction::BHSH(x) => f("BHSH", x.unpack()), Instruction::BHEI(x) => f("BHEI", x.unpack()), Instruction::BURN(x) => f("BURN", x.unpack()), Instruction::CALL(x) => f("CALL", x.unpack()), Instruction::CCP(x) => f("CCP", x.unpack()), Instruction::CROO(x) => f("CROO", x.unpack()), Instruction::CSIZ(x) => f("CSIZ", x.unpack()), Instruction::CB(x) => f("CB", x.unpack()), Instruction::LDC(x) => f("LDC", x.unpack()), Instruction::LOG(x) => f("LOG", x.unpack()), Instruction::LOGD(x) => f("LOGD", x.unpack()), Instruction::MINT(x) => f("MINT", x.unpack()), Instruction::RVRT(x) => f("RVRT", x.unpack()), Instruction::SCWQ(x) => f("SCWQ", x.unpack()), Instruction::SRW(x) => f("SRW", x.unpack()), Instruction::SRWQ(x) => f("SRWQ", x.unpack()), Instruction::SWW(x) => f("SWW", x.unpack()), Instruction::SWWQ(x) => f("SWWQ", x.unpack()), Instruction::TR(x) => f("TR", x.unpack()), Instruction::TRO(x) => f("TRO", x.unpack()), Instruction::ECK1(x) => f("ECK1", x.unpack()), Instruction::ECR1(x) => f("ECR1", x.unpack()), Instruction::ED19(x) => f("ED19", x.unpack()), Instruction::K256(x) => f("K256", x.unpack()), Instruction::S256(x) => f("S256", x.unpack()), Instruction::TIME(x) => f("TIME", x.unpack()), Instruction::NIOP(x) => f("NIOP", x.unpack()), Instruction::NOOP(_) => f("NOOP", ()), Instruction::FLAG(x) => f("FLAG", x.unpack()), Instruction::BAL(x) => f("BAL", x.unpack()), Instruction::JAL(x) => f("JAL", x.unpack()), Instruction::JMP(x) => f("JMP", x.unpack()), Instruction::JNE(x) => f("JNE", x.unpack()), Instruction::SMO(x) => f("SMO", x.unpack()), Instruction::ADDI(x) => f("ADDI", x.unpack()), Instruction::ANDI(x) => f("ANDI", x.unpack()), Instruction::DIVI(x) => f("DIVI", x.unpack()), Instruction::EXPI(x) => f("EXPI", x.unpack()), Instruction::MODI(x) => f("MODI", x.unpack()), Instruction::MULI(x) => f("MULI", x.unpack()), Instruction::ORI(x) => f("ORI", x.unpack()), Instruction::SLLI(x) => f("SLLI", x.unpack()), Instruction::SRLI(x) => f("SRLI", x.unpack()), Instruction::SUBI(x) => f("SUBI", x.unpack()), Instruction::XORI(x) => f("XORI", x.unpack()), Instruction::JNEI(x) => f("JNEI", x.unpack()), Instruction::LB(x) => f("LB", x.unpack()), Instruction::LQW(x) => f("LQW", x.unpack()), Instruction::LHW(x) => f("LHW", x.unpack()), Instruction::LW(x) => f("LW", x.unpack()), Instruction::SB(x) => f("SB", x.unpack()), Instruction::SQW(x) => f("SQW", x.unpack()), Instruction::SHW(x) => f("SHW", x.unpack()), Instruction::SW(x) => f("SW", x.unpack()), Instruction::MCPI(x) => f("MCPI", x.unpack()), Instruction::GTF(x) => f("GTF", x.unpack()), Instruction::MCLI(x) => f("MCLI", x.unpack()), Instruction::GM(x) => f("GM", x.unpack()), Instruction::MOVI(x) => f("MOVI", x.unpack()), Instruction::JNZI(x) => f("JNZI", x.unpack()), Instruction::JMPF(x) => f("JMPF", x.unpack()), Instruction::JMPB(x) => f("JMPB", x.unpack()), Instruction::JNZF(x) => f("JNZF", x.unpack()), Instruction::JNZB(x) => f("JNZB", x.unpack()), Instruction::JNEF(x) => f("JNEF", x.unpack()), Instruction::JNEB(x) => f("JNEB", x.unpack()), Instruction::JI(x) => f("JI", x.unpack()), Instruction::CFEI(x) => f("CFEI", x.unpack()), Instruction::CFSI(x) => f("CFSI", x.unpack()), Instruction::CFE(x) => f("CFE", x.unpack()), Instruction::CFS(x) => f("CFS", x.unpack()), Instruction::PSHL(x) => f("PSHL", x.unpack()), Instruction::PSHH(x) => f("PSHH", x.unpack()), Instruction::POPL(x) => f("POPL", x.unpack()), Instruction::POPH(x) => f("POPH", x.unpack()), Instruction::WDCM(x) => f("WDCM", x.unpack()), Instruction::WQCM(x) => f("WQCM", x.unpack()), Instruction::WDOP(x) => f("WDOP", x.unpack()), Instruction::WQOP(x) => f("WQOP", x.unpack()), Instruction::WDML(x) => f("WDML", x.unpack()), Instruction::WQML(x) => f("WQML", x.unpack()), Instruction::WDDV(x) => f("WDDV", x.unpack()), Instruction::WQDV(x) => f("WQDV", x.unpack()), Instruction::WDMD(x) => f("WDMD", x.unpack()), Instruction::WQMD(x) => f("WQMD", x.unpack()), Instruction::WDAM(x) => f("WDAM", x.unpack()), Instruction::WQAM(x) => f("WQAM", x.unpack()), Instruction::WDMM(x) => f("WDMM", x.unpack()), Instruction::WQMM(x) => f("WQMM", x.unpack()), Instruction::ECAL(x) => f("ECAL", x.unpack()), Instruction::BSIZ(x) => f("BSIZ", x.unpack()), Instruction::BLDD(x) => f("BLDD", x.unpack()), Instruction::ECOP(x) => f("ECOP", x.unpack()), Instruction::EPAR(x) => f("EPAR", x.unpack()), } } /// Checks for disallowed opcodes in non-contract code. /// i.e., if this is a script or predicate, we can't use certain contract opcodes. /// See https://github.com/FuelLabs/sway/issues/350 for details. pub fn check_invalid_opcodes(handler: &Handler, asm: &FinalizedAsm) -> Result<(), ErrorEmitted> { match &asm.program_section { InstructionSet::Fuel { ops } => match asm.program_kind { ProgramKind::Contract | ProgramKind::Library => Ok(()), ProgramKind::Script => checks::check_script_opcodes(handler, &ops[..]), ProgramKind::Predicate => checks::check_predicate_opcodes(handler, &ops[..]), }, InstructionSet::Evm { ops: _ } => Ok(()), } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/instruction_set.rs
sway-core/src/asm_generation/instruction_set.rs
use crate::asm_lang::allocated_ops::AllocatedOp; use std::fmt; /// An [InstructionSet] is produced by allocating registers on an [AbstractInstructionSet]. #[derive(Clone)] pub enum InstructionSet { Fuel { ops: Vec<AllocatedOp> }, Evm { ops: Vec<etk_asm::ops::AbstractOp> }, } impl fmt::Display for InstructionSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, ".program:\n{}", match self { InstructionSet::Fuel { ops } => ops .iter() .map(|x| format!("{x}")) .collect::<Vec<_>>() .join("\n"), InstructionSet::Evm { ops } => ops .iter() .map(|x| format!("{x}")) .collect::<Vec<_>>() .join("\n"), } ) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/from_ir.rs
sway-core/src/asm_generation/from_ir.rs
use super::{ asm_builder::AsmBuilder, evm::EvmAsmBuilder, finalized_asm::{check_invalid_opcodes, FinalizedAsm}, fuel::{ data_section::{DataId, DataSection}, fuel_asm_builder::FuelAsmBuilder, register_sequencer::RegisterSequencer, }, }; use crate::{asm_generation::ProgramKind, BuildConfig, BuildTarget}; use crate::asm_lang::VirtualImmediate18; use sway_error::handler::{ErrorEmitted, Handler}; use sway_ir::{Context, Kind, Module}; pub fn compile_ir_context_to_finalized_asm( handler: &Handler, ir: &Context, build_config: Option<&BuildConfig>, ) -> Result<FinalizedAsm, ErrorEmitted> { // Eventually when we get this 'correct' with no hacks we'll want to compile all the modules // separately and then use a linker to connect them. This way we could also keep binary caches // of libraries and link against them, rather than recompile everything each time. For now we // assume there is one module. assert!(ir.module_iter().count() == 1); let module = ir.module_iter().next().unwrap(); let reg_seqr = RegisterSequencer::new(); let kind = match module.get_kind(ir) { Kind::Contract => ProgramKind::Contract, Kind::Library => ProgramKind::Library, Kind::Predicate => ProgramKind::Predicate, Kind::Script => ProgramKind::Script, }; let build_target = match build_config { Some(cfg) => cfg.build_target, None => BuildTarget::default(), }; let finalized_asm = match build_target { BuildTarget::Fuel => compile( handler, ir, module, build_config, FuelAsmBuilder::new(kind, DataSection::default(), reg_seqr, ir), ), BuildTarget::EVM => compile( handler, ir, module, build_config, EvmAsmBuilder::new(kind, ir), ), }?; check_invalid_opcodes(handler, &finalized_asm)?; Ok(finalized_asm) } fn compile( handler: &Handler, context: &Context, module: Module, build_config: Option<&BuildConfig>, mut builder: impl AsmBuilder, ) -> Result<FinalizedAsm, ErrorEmitted> { let mut fallback_fn = None; // Pre-create labels for all functions before we generate other code, so we can call them // before compiling them if needed. for func in module.function_iter(context) { let (start, _) = builder.func_to_labels(&func); if func.is_fallback(context) { fallback_fn = Some(start); } } for config in module.iter_configs(context) { builder.compile_configurable(config); } for function in module.function_iter(context) { builder.compile_function(handler, function)?; } builder.finalize(handler, build_config, fallback_fn) } // ------------------------------------------------------------------------------------------------- // NOTE: For stack storage we need to be aware: // - sizes are in bytes; CFEI reserves in bytes. // - offsets are in 64-bit words; LW/SW reads/writes to word offsets. XXX Wrap in a WordOffset struct. #[derive(Clone, Debug)] pub(super) enum Storage { Data(DataId), // Const storage in the data section. Stack(u64), // Storage in the runtime stack starting at an absolute word offset. Essentially a global. Const(VirtualImmediate18), // An immediate value that can be moved to a register using MOVI. } pub enum StateAccessType { Read, Write, }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/mod.rs
sway-core/src/asm_generation/mod.rs
pub mod abi; pub use abi::*; pub mod asm_builder; pub mod evm; pub use evm::*; pub mod from_ir; pub mod fuel; pub mod instruction_set; mod finalized_asm; pub use finalized_asm::*; pub use fuel::data_section::{Datum, Entry}; #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum ProgramKind { Contract, Library, Predicate, Script, }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/evm/mod.rs
sway-core/src/asm_generation/evm/mod.rs
mod evm_asm_builder; pub use evm_asm_builder::*;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/evm/evm_asm_builder.rs
sway-core/src/asm_generation/evm/evm_asm_builder.rs
use crate::{ asm_generation::{ asm_builder::AsmBuilder, from_ir::StateAccessType, fuel::data_section::DataSection, instruction_set::InstructionSet, FinalizedAsm, ProgramABI, ProgramKind, }, asm_lang::Label, metadata::MetadataManager, }; use etk_asm::{asm::Assembler, ops::*}; use etk_ops::london::*; use std::collections::HashMap; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_ir::{Context, *}; use sway_types::Span; /// A smart contract is created by sending a transaction with an empty "to" field. /// When this is done, the Ethereum virtual machine (EVM) runs the bytecode which is /// set in the init byte array which is a field that can contain EVM bytecode /// /// The EVM bytecode that is then stored on the blockchain is the value that is /// returned by running the content of init on the EVM. /// /// The bytecode can refer to itself through the opcode CODECOPY opcode, which reads /// three values on the stack where two of those values are pointers to the bytecode, /// one marking the beginning and one marking the end of what should be copied to memory. /// /// The RETURN opcode is then used, along with the correct values placed on the stack, /// to return bytecode from the initial run of the EVM code. /// RETURN reads and removes two pointers from the stack. /// These pointers define the part of the memory that is a return value. /// The return value of the initial contract creating run of the bytecode defines /// the bytecode that is stored on the blockchain and associated with the address /// on which you have created the smart contract. /// /// The code that is compiled but not stored on the blockchain is thus the code needed /// to store the correct code on the blockchain but also any logic that is contained in /// a (potential) constructor of the contract. pub struct EvmAsmBuilder<'ir, 'eng> { #[allow(dead_code)] program_kind: ProgramKind, sections: Vec<EvmAsmSection>, // Label maps are from IR functions or blocks to label name. Functions have a start and end // label. pub(super) func_label_map: HashMap<Function, (Label, Label)>, #[allow(dead_code)] pub(super) block_label_map: HashMap<Block, Label>, // IR context we're compiling. context: &'ir Context<'eng>, // Metadata manager for converting metadata to Spans, etc. md_mgr: MetadataManager, // Monotonically increasing unique identifier for label generation. label_idx: usize, // In progress EVM asm section. pub(super) cur_section: Option<EvmAsmSection>, } #[derive(Default, Debug)] pub struct EvmAsmSection { ops: Vec<etk_asm::ops::AbstractOp>, abi: Vec<ethabi::operation::Operation>, } impl EvmAsmSection { pub fn new() -> Self { Self::default() } pub fn size(&self) -> usize { let mut asm = Assembler::new(); if asm.push_all(self.ops.clone()).is_err() { panic!("Could not size EVM assembly section"); } asm.take().len() } } pub struct EvmAsmBuilderResult { pub ops: Vec<etk_asm::ops::AbstractOp>, pub ops_runtime: Vec<etk_asm::ops::AbstractOp>, pub abi: EvmAbiResult, } pub type EvmAbiResult = Vec<ethabi::operation::Operation>; impl AsmBuilder for EvmAsmBuilder<'_, '_> { fn func_to_labels(&mut self, func: &Function) -> (Label, Label) { self.func_to_labels(func) } fn compile_function( &mut self, handler: &Handler, function: Function, ) -> Result<(), ErrorEmitted> { self.compile_function(handler, function) } fn compile_configurable(&mut self, _config: &ConfigContent) {} fn finalize( self, _handler: &Handler, _build_config: Option<&crate::BuildConfig>, _fallback_fn: Option<Label>, ) -> Result<FinalizedAsm, ErrorEmitted> { let mut global_ops = Vec::new(); let mut global_abi = Vec::new(); let mut size = 0; let mut it = self.sections.iter().peekable(); while let Some(section) = it.next() { size += section.size(); global_ops.append(&mut section.ops.clone()); global_abi.append(&mut section.abi.clone()); if it.peek().is_some() { size += AbstractOp::Op(Op::Invalid(etk_ops::london::Invalid)) .size() .unwrap(); global_ops.push(AbstractOp::Op(Op::Invalid(etk_ops::london::Invalid))); } } // First generate a dummy ctor section to calculate its size. let dummy = self.generate_constructor(false, size, 0); // Generate the actual ctor section with the correct size.. let mut ctor = self.generate_constructor(false, size, dummy.size()); ctor.ops.append(&mut global_ops); global_abi.append(&mut ctor.abi); let final_program = EvmFinalProgram { ops: ctor.ops.clone(), abi: global_abi, }; Ok(final_program.finalize()) } } #[allow(unused_variables)] #[allow(dead_code)] impl<'ir, 'eng> EvmAsmBuilder<'ir, 'eng> { pub fn new(program_kind: ProgramKind, context: &'ir Context<'eng>) -> Self { Self { program_kind, sections: Vec::new(), func_label_map: HashMap::new(), block_label_map: HashMap::new(), context, md_mgr: MetadataManager::default(), label_idx: 0, cur_section: None, } } fn generate_constructor( &self, is_payable: bool, data_size: usize, data_offset: usize, ) -> EvmAsmSection { // For more details and explanations see: // https://medium.com/@hayeah/diving-into-the-ethereum-vm-part-5-the-smart-contract-creation-process-cb7b6133b855. let mut s = EvmAsmSection::new(); self.setup_free_memory_pointer(&mut s); if is_payable { // Get the amount of ETH transferred to the contract by the parent contract, // or by a transaction and check for a non-payable contract. Revert if caller // sent ether. // // callvalue // dup1 // iszero // push1 0x0f // jumpi // push1 0x00 // dup1 // revert // jumpdest // pop s.ops.push(AbstractOp::new(Op::CallValue(CallValue))); s.ops.push(AbstractOp::new(Op::Dup1(Dup1))); s.ops.push(AbstractOp::new(Op::IsZero(IsZero))); let tag_label = "tag_1"; s.ops.push(AbstractOp::new(Op::Push1(Push1(Imm::with_label( tag_label, ))))); s.ops.push(AbstractOp::new(Op::JumpI(JumpI))); s.ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x00.into()), ))))); s.ops.push(AbstractOp::new(Op::Dup1(Dup1))); s.ops.push(AbstractOp::new(Op::Revert(Revert))); s.ops.push(AbstractOp::Label("tag_1".into())); s.ops.push(AbstractOp::new(Op::JumpDest(JumpDest))); s.ops.push(AbstractOp::Op(Op::Pop(Pop))); } self.copy_contract_code_to_memory(&mut s, data_size, data_offset); s.abi.push(ethabi::operation::Operation::Constructor( ethabi::Constructor { inputs: vec![] }, )); s } fn copy_contract_code_to_memory( &self, s: &mut EvmAsmSection, data_size: usize, data_offset: usize, ) { // Copy contract code into memory, and return. // push1 dataSize // dup1 // push1 dataOffset // push1 0x00 // codecopy // push1 0x00 // return s.ops.push(AbstractOp::Push(Imm::from(Terminal::Number( data_size.into(), )))); s.ops.push(AbstractOp::new(Op::Dup1(Dup1))); s.ops.push(AbstractOp::Push(Imm::from(Terminal::Number( data_offset.into(), )))); s.ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x00.into()), ))))); s.ops.push(AbstractOp::Op(Op::CodeCopy(CodeCopy))); s.ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x00.into()), ))))); s.ops.push(AbstractOp::Op(Op::Return(Return))); } fn setup_free_memory_pointer(&self, s: &mut EvmAsmSection) { // Setup the initial free memory pointer. // // The "free memory pointer" is stored at position 0x40 in memory. // The first 64 bytes of memory can be used as "scratch space" for short-term allocation. // The 32 bytes after the free memory pointer (i.e., starting at 0x60) are meant to be // zero permanently and is used as the initial value for empty dynamic memory arrays. // This means that the allocatable memory starts at 0x80, which is the initial value // of the free memory pointer. // // push1 0x80 // push1 0x40 // mstore s.ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x80.into()), ))))); s.ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x40.into()), ))))); s.ops.push(AbstractOp::new(Op::MStore(MStore))); } fn empty_span() -> Span { let msg = "unknown source location"; Span::new(msg.into(), 0, msg.len(), None).unwrap() } fn get_label(&mut self) -> Label { let next_val = self.label_idx; self.label_idx += 1; Label(self.label_idx) } pub(super) fn compile_instruction( &mut self, handler: &Handler, instr_val: &Value, func_is_entry: bool, ) -> Result<(), ErrorEmitted> { if let Some(instruction) = instr_val.get_instruction(self.context) { match &instruction.op { InstOp::AsmBlock(asm, args) => { self.compile_asm_block(handler, instr_val, asm, args)? } InstOp::BitCast(val, ty) => self.compile_bitcast(instr_val, val, ty), InstOp::UnaryOp { op, arg } => self.compile_unary_op(instr_val, op, arg), InstOp::BinaryOp { op, arg1, arg2 } => { self.compile_binary_op(instr_val, op, arg1, arg2) } InstOp::Branch(to_block) => self.compile_branch(to_block), InstOp::Call(func, args) => self.compile_call(instr_val, func, args), InstOp::CastPtr(val, ty) => self.compile_cast_ptr(instr_val, val, ty), InstOp::Cmp(pred, lhs_value, rhs_value) => { self.compile_cmp(instr_val, pred, lhs_value, rhs_value) } InstOp::ConditionalBranch { cond_value, true_block, false_block, } => { self.compile_conditional_branch(handler, cond_value, true_block, false_block)? } InstOp::ContractCall { params, coins, asset_id, gas, .. } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), InstOp::FuelVm(fuel_vm_instr) => { handler.emit_err(CompileError::Internal( "Invalid FuelVM IR instruction provided to the EVM code gen.", self.md_mgr .val_to_span(self.context, *instr_val) .unwrap_or_else(Self::empty_span), )); } InstOp::GetElemPtr { base, elem_ptr_ty, indices, } => self.compile_get_elem_ptr(instr_val, base, elem_ptr_ty, indices), InstOp::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), InstOp::GetGlobal(global_var) => self.compile_get_global(instr_val, global_var), InstOp::GetConfig(_, name) => self.compile_get_config(instr_val, name), InstOp::GetStorageKey(storage_key) => { self.compile_get_storage_key(instr_val, storage_key) } InstOp::IntToPtr(val, _) => self.compile_int_to_ptr(instr_val, val), InstOp::Load(src_val) => self.compile_load(handler, instr_val, src_val)?, InstOp::Alloc { ty, count } => self.compile_alloc(instr_val, ty, count), InstOp::MemCopyBytes { dst_val_ptr, src_val_ptr, byte_len, } => self.compile_mem_copy_bytes(instr_val, dst_val_ptr, src_val_ptr, *byte_len), InstOp::MemCopyVal { dst_val_ptr, src_val_ptr, } => self.compile_mem_copy_val(instr_val, dst_val_ptr, src_val_ptr), InstOp::MemClearVal { dst_val_ptr } => { todo!(); } InstOp::Nop => (), InstOp::PtrToInt(ptr_val, int_ty) => { self.compile_ptr_to_int(instr_val, ptr_val, int_ty) } InstOp::Ret(ret_val, ty) => { if func_is_entry { self.compile_ret_from_entry(instr_val, ret_val, ty) } else { self.compile_ret_from_call(instr_val, ret_val) } } InstOp::Store { dst_val_ptr: dst_val, stored_val, } => self.compile_store(handler, instr_val, dst_val, stored_val)?, } } else { handler.emit_err(CompileError::Internal( "Value not an instruction.", self.md_mgr .val_to_span(self.context, *instr_val) .unwrap_or_else(Self::empty_span), )); } Ok(()) } fn compile_asm_block( &mut self, handler: &Handler, instr_val: &Value, asm: &AsmBlock, asm_args: &[AsmArg], ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_bitcast(&mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type) { todo!(); } fn compile_unary_op(&mut self, instr_val: &Value, op: &UnaryOpKind, arg: &Value) { todo!(); } fn compile_binary_op( &mut self, instr_val: &Value, op: &BinaryOpKind, arg1: &Value, arg2: &Value, ) { todo!(); } fn compile_branch(&mut self, to_block: &BranchToWithArgs) { todo!(); } fn compile_cast_ptr(&mut self, instr_val: &Value, val: &Value, ty: &Type) { todo!(); } fn compile_cmp( &mut self, instr_val: &Value, pred: &Predicate, lhs_value: &Value, rhs_value: &Value, ) { todo!(); } fn compile_conditional_branch( &mut self, handler: &Handler, cond_value: &Value, true_block: &BranchToWithArgs, false_block: &BranchToWithArgs, ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_branch_to_phi_value(&mut self, to_block: &BranchToWithArgs) { todo!(); } #[allow(clippy::too_many_arguments)] fn compile_contract_call( &mut self, instr_val: &Value, params: &Value, coins: &Value, asset_id: &Value, gas: &Value, ) { todo!(); } fn compile_get_elem_ptr( &mut self, instr_val: &Value, base: &Value, elem_ptr_ty: &Type, indices: &[Value], ) { todo!(); } fn compile_get_global(&mut self, instr_val: &Value, global_var: &GlobalVar) { todo!(); } fn compile_get_local(&mut self, instr_val: &Value, local_var: &LocalVar) { todo!(); } fn compile_get_config(&mut self, instr_val: &Value, name: &str) { todo!(); } fn compile_get_storage_key(&mut self, instr_val: &Value, storage_key: &StorageKey) { todo!(); } fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { todo!(); } fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { todo!(); } fn compile_load( &mut self, handler: &Handler, instr_val: &Value, src_val: &Value, ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_alloc(&mut self, instr_val: &Value, ty: &Type, count: &Value) { todo!(); } fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { todo!(); } fn compile_mem_copy_bytes( &mut self, instr_val: &Value, dst_val_ptr: &Value, src_val_ptr: &Value, byte_len: u64, ) { todo!(); } fn compile_mem_copy_val( &mut self, instr_val: &Value, dst_val_ptr: &Value, src_val_ptr: &Value, ) { todo!(); } fn compile_ptr_to_int(&mut self, instr_val: &Value, ptr_val: &Value, int_ty: &Type) { todo!(); } fn compile_read_register(&mut self, instr_val: &Value, reg: &sway_ir::Register) { todo!(); } fn compile_ret_from_entry(&mut self, instr_val: &Value, ret_val: &Value, ret_type: &Type) { if ret_type.is_unit(self.context) { // Unit returns should always be zero, although because they can be omitted from // functions, the register is sometimes uninitialized. Manually return zero in this // case. self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::Op(Op::Return(Return))); } else { todo!(); } } fn compile_revert(&mut self, instr_val: &Value, revert_val: &Value) { todo!(); } fn compile_smo( &mut self, instr_val: &Value, recipient_and_message: &Value, message_size: &Value, output_index: &Value, coins: &Value, ) { todo!(); } fn compile_state_access_quad_word( &mut self, handler: &Handler, instr_val: &Value, val: &Value, key: &Value, number_of_slots: &Value, access_type: StateAccessType, ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_state_load_word( &mut self, handler: &Handler, instr_val: &Value, key: &Value, ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_state_store_word( &mut self, handler: &Handler, instr_val: &Value, store_val: &Value, key: &Value, ) -> Result<(), ErrorEmitted> { todo!(); } fn compile_store( &mut self, handler: &Handler, instr_val: &Value, dst_val: &Value, stored_val: &Value, ) -> Result<(), ErrorEmitted> { todo!(); } pub(super) fn func_to_labels(&mut self, func: &Function) -> (Label, Label) { self.func_label_map.get(func).cloned().unwrap_or_else(|| { let labels = (self.get_label(), self.get_label()); self.func_label_map.insert(*func, labels); labels }) } pub fn compile_function( &mut self, handler: &Handler, function: Function, ) -> Result<(), ErrorEmitted> { self.cur_section = Some(EvmAsmSection::new()); // push1 0x80 // push1 0x40 // mstore self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x80.into()), ))))); self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x40.into()), ))))); self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::MStore(MStore))); //self.init_locals(function); let func_is_entry = function.is_entry(self.context); // Compile instructions. for block in function.block_iter(self.context) { self.insert_block_label(block); for instr_val in block.instruction_iter(self.context) { self.compile_instruction(handler, &instr_val, func_is_entry)?; } } // push1 0x00 // dup1 // revert self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::Push1(Push1(Imm::with_expression( Expression::Terminal(0x00.into()), ))))); self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::Dup1(Dup1))); self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::new(Op::Revert(Revert))); // Generate the ABI. #[allow(deprecated)] self.cur_section .as_mut() .unwrap() .abi .push(ethabi::operation::Operation::Function(ethabi::Function { name: function.get_name(self.context).to_string(), inputs: vec![], outputs: vec![], constant: None, state_mutability: ethabi::StateMutability::NonPayable, })); self.sections.push(self.cur_section.take().unwrap()); self.cur_section = None; Ok(()) } pub(super) fn compile_call(&mut self, instr_val: &Value, function: &Function, args: &[Value]) { todo!(); } pub(super) fn compile_ret_from_call(&mut self, instr_val: &Value, ret_val: &Value) { todo!(); } pub(super) fn insert_block_label(&mut self, block: Block) { if &block.get_label(self.context) != "entry" { let label = self.block_to_label(&block); self.cur_section .as_mut() .unwrap() .ops .push(AbstractOp::Label(label.to_string())); } } fn block_to_label(&mut self, block: &Block) -> Label { self.block_label_map.get(block).cloned().unwrap_or_else(|| { let label = self.get_label(); self.block_label_map.insert(*block, label); label }) } } struct EvmFinalProgram { ops: Vec<etk_asm::ops::AbstractOp>, abi: Vec<ethabi::operation::Operation>, } impl EvmFinalProgram { fn finalize(self) -> FinalizedAsm { FinalizedAsm { data_section: DataSection { ..Default::default() }, program_section: InstructionSet::Evm { ops: self.ops }, program_kind: ProgramKind::Script, entries: vec![], abi: Some(ProgramABI::Evm(self.abi)), } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/abstract_instruction_set.rs
sway-core/src/asm_generation/fuel/abstract_instruction_set.rs
use sway_error::error::CompileError; use crate::asm_lang::{allocated_ops::AllocatedOp, Op, RealizedOp}; use std::fmt; use super::{ allocated_abstract_instruction_set::AllocatedAbstractInstructionSet, register_allocator, }; /// An [AbstractInstructionSet] is a set of instructions that use entirely virtual registers /// and excessive moves, with the intention of later optimizing it. #[derive(Clone)] pub struct AbstractInstructionSet { pub(crate) ops: Vec<Op>, } impl AbstractInstructionSet { /// Allocate registers. pub(crate) fn allocate_registers( self, ) -> Result<AllocatedAbstractInstructionSet, CompileError> { register_allocator::allocate_registers(&self.ops) } } impl fmt::Display for AbstractInstructionSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, ".program:\n{}", self.ops .iter() .map(|x| format!("{x}")) .collect::<Vec<_>>() .join("\n") ) } } /// "Realized" here refers to labels -- there are no more organizational /// ops or labels. In this struct, they are all "realized" to offsets. pub struct RealizedAbstractInstructionSet { pub(super) ops: Vec<RealizedOp>, } impl RealizedAbstractInstructionSet { pub(crate) fn allocated_ops(self) -> Vec<AllocatedOp> { self.ops .into_iter() .map( |RealizedOp { opcode, comment, owning_span, }| { AllocatedOp { opcode, comment, owning_span, } }, ) .collect::<Vec<_>>() } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/allocated_abstract_instruction_set.rs
sway-core/src/asm_generation/fuel/allocated_abstract_instruction_set.rs
use crate::{ asm_generation::fuel::data_section::EntryName, asm_lang::{ allocated_ops::{AllocatedInstruction, AllocatedRegister}, AllocatedAbstractOp, ConstantRegister, ControlFlowOp, JumpType, Label, RealizedOp, VirtualImmediate12, VirtualImmediate18, VirtualImmediate24, }, }; use super::{ abstract_instruction_set::RealizedAbstractInstructionSet, compiler_constants as consts, data_section::{DataSection, Entry}, }; use fuel_vm::prelude::Instruction; use indexmap::{IndexMap, IndexSet}; use rustc_hash::FxHashMap; use sway_types::span::Span; use std::collections::{BTreeSet, HashMap}; use either::Either; // Convenience type for representing a map from a label to its offset and number of instructions // following it until the next label (i.e., the length of the basic block). pub(crate) type LabeledBlocks = HashMap<Label, BasicBlock>; #[derive(Clone, Copy, Debug)] pub(crate) struct BasicBlock { pub(crate) offs: u64, } #[derive(Clone)] pub struct AllocatedAbstractInstructionSet { pub(crate) ops: Vec<AllocatedAbstractOp>, } impl AllocatedAbstractInstructionSet { pub(crate) fn optimize(self) -> AllocatedAbstractInstructionSet { self.remove_redundant_ops() } fn remove_redundant_ops(mut self) -> AllocatedAbstractInstructionSet { self.ops.retain(|op| { // It is easier to think in terms of operations we want to remove // than the operations we want to retain ;-) let remove = match &op.opcode { // `cfei i0` and `cfsi i0` pairs. Either::Left(AllocatedInstruction::CFEI(imm)) | Either::Left(AllocatedInstruction::CFSI(imm)) => imm.value() == 0u32, // `cfe $zero` and `cfs $zero` pairs. Either::Left(AllocatedInstruction::CFE(reg)) | Either::Left(AllocatedInstruction::CFS(reg)) => reg.is_zero(), _ => false, }; !remove }); self } /// Replace each PUSHA instruction with stores of all used registers to the stack, and each /// POPA with respective loads from the stack. /// /// Typically there will be only one of each but the code here allows for nested sections or /// even overlapping sections. pub(crate) fn emit_pusha_popa(mut self) -> Self { // Gather the sets of used registers per section. Using a fold here because it's actually // simpler to manage. We use a HashSet to keep track of the active section labels and then // build a HashMap of Label to HashSet of registers. let reg_sets = self .ops .iter() .fold( (IndexMap::new(), IndexSet::new()), |(mut reg_sets, mut active_sets), op| { let regs: Box<dyn Iterator<Item = &AllocatedRegister>> = match &op.opcode { Either::Right(ControlFlowOp::PushAll(label)) => { active_sets.insert(*label); Box::new(std::iter::empty()) } Either::Right(ControlFlowOp::PopAll(label)) => { active_sets.swap_remove(label); Box::new(std::iter::empty()) } Either::Left(alloc_op) => Box::new(alloc_op.def_registers().into_iter()), Either::Right(ctrl_op) => Box::new(ctrl_op.def_registers().into_iter()), }; for reg in regs { for active_label in active_sets.clone() { reg_sets .entry(active_label) .and_modify(|regs: &mut BTreeSet<AllocatedRegister>| { regs.insert(reg.clone()); }) .or_insert_with(|| { BTreeSet::from_iter(std::iter::once(reg).cloned()) }); } } (reg_sets, active_sets) }, ) .0; fn generate_mask(regs: &[&AllocatedRegister]) -> (VirtualImmediate24, VirtualImmediate24) { let mask = regs.iter().fold((0, 0), |mut accum, reg| { let reg_id = reg.to_reg_id().to_u8(); assert!((16..64).contains(&reg_id)); let reg_id = reg_id - 16; let (mask_ref, bit) = if reg_id < 24 { (&mut accum.0, reg_id) } else { (&mut accum.1, reg_id - 24) }; // Set bit (from the least significant side) of mask_ref. *mask_ref |= 1 << bit; accum }); ( VirtualImmediate24::try_new(mask.0, Span::dummy()) .expect("mask should have fit in 24b"), VirtualImmediate24::try_new(mask.1, Span::dummy()) .expect("mask should have fit in 24b"), ) } // Now replace the PUSHA/POPA instructions with STOREs and LOADs. self.ops = self.ops.drain(..).fold(Vec::new(), |mut new_ops, op| { match &op.opcode { Either::Right(ControlFlowOp::PushAll(label)) => { let regs = reg_sets .get(label) .expect("Have collected registers above.") .iter() .filter(|reg| matches!(reg, AllocatedRegister::Allocated(_))) .chain([&AllocatedRegister::Constant(ConstantRegister::LocalsBase)]) .collect::<Vec<_>>(); let (mask_l, mask_h) = generate_mask(&regs); if mask_l.value() != 0 { new_ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::PSHL(mask_l)), comment: "save registers 16..40".into(), owning_span: op.owning_span.clone(), }); } if mask_h.value() != 0 { new_ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::PSHH(mask_h)), comment: "save registers 40..64".into(), owning_span: op.owning_span.clone(), }); } } Either::Right(ControlFlowOp::PopAll(label)) => { let regs = reg_sets .get(label) .expect("Have collected registers above.") .iter() .filter(|reg| matches!(reg, AllocatedRegister::Allocated(_))) .chain([&AllocatedRegister::Constant(ConstantRegister::LocalsBase)]) .collect::<Vec<_>>(); let (mask_l, mask_h) = generate_mask(&regs); if mask_h.value() != 0 { new_ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::POPH(mask_h)), comment: "restore registers 40..64".into(), owning_span: op.owning_span.clone(), }); } if mask_l.value() != 0 { new_ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::POPL(mask_l)), comment: "restore registers 16..40".into(), owning_span: op.owning_span.clone(), }); } } _otherwise => new_ops.push(op), }; new_ops }); self } /// Runs two passes -- one to get the instruction offsets of the labels and one to replace the /// labels in the organizational ops pub(crate) fn realize_labels( mut self, data_section: &mut DataSection, far_jump_sizes: &FxHashMap<usize, u64>, ) -> Result<(RealizedAbstractInstructionSet, LabeledBlocks), crate::CompileError> { let label_offsets = self.resolve_labels(data_section); let mut curr_offset = 0; let mut realized_ops = vec![]; for (op_idx, op) in self.ops.iter().enumerate() { let op_size = far_jump_sizes .get(&op_idx) .copied() .unwrap_or_else(|| Self::instruction_size_not_far_jump(op, data_section)); let AllocatedAbstractOp { opcode, comment, owning_span, } = op.clone(); match opcode { Either::Left(op) => realized_ops.push(RealizedOp { opcode: op, owning_span, comment, }), Either::Right(org_op) => match org_op { ControlFlowOp::Jump { to, type_ } => { let target_offset = label_offsets.get(&to).unwrap().offs; let ops = if matches!(type_, JumpType::Call) { compile_call( data_section, curr_offset, target_offset, far_jump_sizes.get(&op_idx).copied(), comment, owning_span, ) } else { compile_jump( data_section, curr_offset, target_offset, match type_ { JumpType::NotZero(cond) => Some(cond), _ => None, }, far_jump_sizes.contains_key(&op_idx), comment, owning_span, ) }; debug_assert_eq!(ops.len() as u64, op_size); realized_ops.extend(ops); } ControlFlowOp::DataSectionOffsetPlaceholder => { realized_ops.push(RealizedOp { opcode: AllocatedInstruction::DataSectionOffsetPlaceholder, owning_span: None, comment: String::new(), }); } ControlFlowOp::ConfigurablesOffsetPlaceholder => { realized_ops.push(RealizedOp { opcode: AllocatedInstruction::ConfigurablesOffsetPlaceholder, owning_span: None, comment: String::new(), }); } ControlFlowOp::Comment => continue, ControlFlowOp::Label(..) => continue, ControlFlowOp::PushAll(_) | ControlFlowOp::PopAll(_) => { unreachable!("still don't belong in organisational ops") } }, }; curr_offset += op_size; } let realized_ops = RealizedAbstractInstructionSet { ops: realized_ops }; Ok((realized_ops, label_offsets)) } /// Resolve jump label offsets. /// /// For very large programs the label offsets may be too large to fit in an immediate part /// of the jump instruction. In these case we must use a register value as a jump target. /// This requires two instructions, one to load the destination register and then the jump itself. /// /// But we don't know the offset of a label until we scan through the ops and count them. /// So we have a chicken and egg situation where we may need to add new instructions which /// would change the offsets to all labels thereafter, which in turn could require more /// instructions to be added, and so on. /// /// For this reason, we take a two-pass approach. On the first pass, we pessimistically assume /// that all jumps may require take two opcodes, and use this assumption to calculate the /// offsets of labels. Then we see which jumps actually require two opcodes and mark them as such. /// This approach is not optimal as it sometimes requires more opcodes than necessary, /// but it is simple and quite works well in practice. fn resolve_labels(&mut self, data_section: &mut DataSection) -> LabeledBlocks { let far_jump_indices = self.collect_far_jumps(); self.map_label_offsets(data_section, &far_jump_indices) } // Returns largest size an instruction can take up. // The return value is in concrete instructions, i.e. units of 4 bytes. fn worst_case_instruction_size(op: &AllocatedAbstractOp) -> u64 { use ControlFlowOp::*; match op.opcode { Either::Right(Label(_)) => 0, // Loads from data section may take up to 2 instructions Either::Left( AllocatedInstruction::LoadDataId(_, _) | AllocatedInstruction::AddrDataId(_, _), ) => 2, // cfei 0 and cfsi 0 are omitted from asm emission, don't count them for offsets Either::Left(AllocatedInstruction::CFEI(ref op)) | Either::Left(AllocatedInstruction::CFSI(ref op)) if op.value() == 0 => { 0 } // Another special case for the blob opcode, used for testing. Either::Left(AllocatedInstruction::BLOB(ref count)) => count.value() as u64, // This is a concrete op, size is fixed Either::Left(_) => 1, // Worst case for jump is 2 opcodes, and 3 for calls Either::Right(Jump { ref type_, .. }) => match type_ { JumpType::Unconditional => 2, JumpType::NotZero(_) => 2, JumpType::Call => 3, }, Either::Right(Comment) => 0, Either::Right(DataSectionOffsetPlaceholder) => { // If the placeholder is 32 bits, this is 1. if 64, this should be 2. We use LW // to load the data, which loads a whole word, so for now this is 2. 2 } Either::Right(ConfigurablesOffsetPlaceholder) => 2, Either::Right(PushAll(_)) | Either::Right(PopAll(_)) => unreachable!( "fix me, pushall and popall don't really belong in control flow ops \ since they're not about control flow" ), } } // Actual size of an instruction. // Note that this return incorrect values for far jumps, they must be handled separately. // The return value is in concrete instructions, i.e. units of 4 bytes. fn instruction_size_not_far_jump(op: &AllocatedAbstractOp, data_section: &DataSection) -> u64 { use ControlFlowOp::*; match op.opcode { Either::Right(Label(_)) => 0, // A special case for LoadDataId which may be 1 or 2 ops, depending on the source size. Either::Left(AllocatedInstruction::LoadDataId(_, ref data_id)) => { let has_copy_type = data_section.has_copy_type(data_id).expect( "Internal miscalculation in data section -- \ data id did not match up to any actual data", ); if has_copy_type { 1 } else { 2 } } Either::Left(AllocatedInstruction::AddrDataId(_, ref _data_id)) => 2, // cfei 0 and cfsi 0 are omitted from asm emission, don't count them for offsets Either::Left(AllocatedInstruction::CFEI(ref op)) | Either::Left(AllocatedInstruction::CFSI(ref op)) if op.value() == 0 => { 0 } // Another special case for the blob opcode, used for testing. Either::Left(AllocatedInstruction::BLOB(ref count)) => count.value() as u64, // This is a concrete op, size is fixed Either::Left(_) => 1, // Far jumps must be handled separately, as they require two instructions. Either::Right(Jump { .. }) => 1, Either::Right(Comment) => 0, Either::Right(DataSectionOffsetPlaceholder) => { // If the placeholder is 32 bits, this is 1. if 64, this should be 2. We use LW // to load the data, which loads a whole word, so for now this is 2. 2 } Either::Right(ConfigurablesOffsetPlaceholder) => 2, Either::Right(PushAll(_)) | Either::Right(PopAll(_)) => unreachable!( "fix me, pushall and popall don't really belong in control flow ops \ since they're not about control flow" ), } } /// Go through all jumps and check if they could require a far jump in the worst case. /// For far jumps we have to reserve space for an extra opcode to load target address. /// For far calls, we need to reserve two extra opcodes. /// Also, this will be mark self-jumps, as they require a noop to be inserted before them. pub(crate) fn collect_far_jumps(&self) -> FxHashMap<usize, u64> { let mut labelled_blocks = LabeledBlocks::new(); let mut cur_offset = 0; let mut cur_basic_block = None; let mut far_jump_sizes = FxHashMap::default(); struct JumpInfo { to: Label, offset: u64, op_idx: usize, } let mut jumps = Vec::new(); for (op_idx, op) in self.ops.iter().enumerate() { // If we're seeing a control flow op then it's the end of the block. if let Either::Right(ControlFlowOp::Label(_) | ControlFlowOp::Jump { .. }) = op.opcode { if let Some((lab, _idx, offs)) = cur_basic_block { // Insert the previous basic block. labelled_blocks.insert(lab, BasicBlock { offs }); } } if let Either::Right(ControlFlowOp::Label(cur_lab)) = op.opcode { // Save the new block label and furthest offset. cur_basic_block = Some((cur_lab, op_idx, cur_offset)); } if let Either::Right(ControlFlowOp::Jump { to, .. }) = &op.opcode { jumps.push(JumpInfo { to: *to, offset: cur_offset, op_idx, }); } // Update the offset. cur_offset += Self::worst_case_instruction_size(op); } // Don't forget the final block. if let Some((lab, _idx, offs)) = cur_basic_block { labelled_blocks.insert(lab, BasicBlock { offs }); } for jump in jumps { let offs = labelled_blocks.get(&jump.to).unwrap().offs; let rel_offset = offs.abs_diff(jump.offset); let Either::Right(ControlFlowOp::Jump { ref type_, .. }) = self.ops[jump.op_idx].opcode else { unreachable!("Jump info should only be collected for jumps"); }; // Relative self jumps need a NOOP inserted before it so that we can jump to the NOOP. let is_self_jump = rel_offset == 0; match type_ { JumpType::Unconditional => { // Unconditional jumps have 18-bit immidate offset if is_self_jump || rel_offset > consts::EIGHTEEN_BITS { far_jump_sizes.insert(jump.op_idx, 2); } } JumpType::NotZero(_) => { // Conditional jumps have 12-bit immidate offset if is_self_jump || rel_offset > consts::TWELVE_BITS { far_jump_sizes.insert(jump.op_idx, 2); } } JumpType::Call => { // Use the actual codegen to estimate the size of the call. // This can never generate a number that's too small, but in some // corner cases it leads to reserving an extra opcode. // See `compile_call` that inserts NOOPs to pad the call in these cases. let len = compile_call_inner( &mut DataSection::default(), jump.offset, offs, String::new(), None, ) .len(); far_jump_sizes.insert(jump.op_idx, len as u64); } }; } far_jump_sizes } /// Map the labels to their offsets in the program. fn map_label_offsets( &self, data_section: &DataSection, far_jump_sizes: &FxHashMap<usize, u64>, ) -> LabeledBlocks { let mut labelled_blocks = LabeledBlocks::new(); let mut cur_offset = 0; let mut cur_basic_block = None; for (op_idx, op) in self.ops.iter().enumerate() { // If we're seeing a control flow op then it's the end of the block. if let Either::Right(ControlFlowOp::Label(_) | ControlFlowOp::Jump { .. }) = op.opcode { if let Some((lab, _idx, offs)) = cur_basic_block { // Insert the previous basic block. labelled_blocks.insert(lab, BasicBlock { offs }); } } if let Either::Right(ControlFlowOp::Label(cur_lab)) = op.opcode { // Save the new block label and furthest offset. cur_basic_block = Some((cur_lab, op_idx, cur_offset)); } // Update the offset. let op_size = far_jump_sizes .get(&op_idx) .copied() .unwrap_or_else(|| Self::instruction_size_not_far_jump(op, data_section)); cur_offset += op_size; } // Don't forget the final block. if let Some((lab, _idx, offs)) = cur_basic_block { labelled_blocks.insert(lab, BasicBlock { offs }); } labelled_blocks } } impl std::fmt::Display for AllocatedAbstractInstructionSet { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, ".program:\n{}", self.ops .iter() .map(|op| format!("{op}")) .collect::<Vec<_>>() .join("\n") ) } } /// Compiles jump into the appropriate operations. /// Near jumps are compiled into a single instruction, while far jumps are compiled into /// two instructions: one to load the target address and another to jump to it. pub(crate) fn compile_jump( data_section: &mut DataSection, curr_offset: u64, target_offset: u64, condition_nonzero: Option<AllocatedRegister>, far: bool, comment: String, owning_span: Option<Span>, ) -> Vec<RealizedOp> { if curr_offset == target_offset { if !far { unreachable!("Self jump should have been marked by mark_far_jumps"); } return vec![ RealizedOp { opcode: AllocatedInstruction::NOOP, owning_span: owning_span.clone(), comment: "".into(), }, if let Some(cond_nz) = condition_nonzero { RealizedOp { opcode: AllocatedInstruction::JNZB( cond_nz, AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate12::new(0), ), owning_span, comment, } } else { RealizedOp { opcode: AllocatedInstruction::JMPB( AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate18::new(0), ), owning_span, comment, } }, ]; } if curr_offset > target_offset { let delta = curr_offset - target_offset - 1; return if far { let data_id = data_section.insert_data_value(Entry::new_word( delta + 1, // +1 since the load instruction must be skipped as well EntryName::NonConfigurable, None, )); vec![ RealizedOp { opcode: AllocatedInstruction::LoadDataId( AllocatedRegister::Constant(ConstantRegister::Scratch), data_id, ), owning_span: owning_span.clone(), comment: "load far jump target address".into(), }, RealizedOp { opcode: if let Some(cond_nz) = condition_nonzero { AllocatedInstruction::JNZB( cond_nz, AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate12::new(0), ) } else { AllocatedInstruction::JMPB( AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate18::new(0), ) }, owning_span, comment, }, ] } else { vec![RealizedOp { opcode: if let Some(cond_nz) = condition_nonzero { AllocatedInstruction::JNZB( cond_nz, AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate12::new(delta), ) } else { AllocatedInstruction::JMPB( AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate18::new(delta), ) }, owning_span, comment, }] }; } let delta = target_offset - curr_offset - 1; if far { let data_id = data_section.insert_data_value(Entry::new_word( delta - 1, EntryName::NonConfigurable, None, )); vec![ RealizedOp { opcode: AllocatedInstruction::LoadDataId( AllocatedRegister::Constant(ConstantRegister::Scratch), data_id, ), owning_span: owning_span.clone(), comment: "load far jump target address".into(), }, RealizedOp { opcode: if let Some(cond_nz) = condition_nonzero { AllocatedInstruction::JNZF( cond_nz, AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate12::new(0), ) } else { AllocatedInstruction::JMPF( AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate18::new(0), ) }, owning_span, comment, }, ] } else { vec![RealizedOp { opcode: if let Some(cond_nz) = condition_nonzero { AllocatedInstruction::JNZF( cond_nz, AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate12::new(delta), ) } else { AllocatedInstruction::JMPF( AllocatedRegister::Constant(ConstantRegister::Zero), VirtualImmediate18::new(delta), ) }, owning_span, comment, }] } } /// Compiles a function call into the appropriate operations. /// Generates 1 to 3 instruction depending on the distance to target. pub(crate) fn compile_call_inner( data_section: &mut DataSection, curr_offset: u64, target_offset: u64, comment: String, owning_span: Option<Span>, ) -> Vec<RealizedOp> { // Handle forwards and backwards jumps separately if curr_offset <= target_offset { let delta = target_offset - curr_offset; // If the offset is small enough for a single instruction, do it directly if let Ok(imm) = VirtualImmediate12::try_new(delta, Span::dummy()) { return vec![RealizedOp { opcode: AllocatedInstruction::JAL( AllocatedRegister::Constant(ConstantRegister::CallReturnAddress), AllocatedRegister::Constant(ConstantRegister::ProgramCounter), imm, ), owning_span, comment, }]; } // The next approaches require an extra instruction before the PC is used, so we // subtract 1 from the delta to account for that. It cannot underflow as otherwise the first // approach would have been used. Then we multiply by instruction size for doing arithmetic // with the PC register. The overflow cannot occur since programs cannot be 2**60 bytes large. let delta_instr = (delta - 1) * (Instruction::SIZE as u64); // Attempt MOVI-based approach, that has larger immediate size but doesn't require data section. if let Ok(imm) = VirtualImmediate18::try_new(delta_instr, Span::dummy()) { return vec![ RealizedOp { opcode: AllocatedInstruction::MOVI( AllocatedRegister::Constant(ConstantRegister::Scratch), imm, ), owning_span: owning_span.clone(), comment: "load call target address".into(), }, RealizedOp { opcode: AllocatedInstruction::ADD( AllocatedRegister::Constant(ConstantRegister::Scratch), AllocatedRegister::Constant(ConstantRegister::ProgramCounter), AllocatedRegister::Constant(ConstantRegister::Scratch), ), owning_span: owning_span.clone(), comment: "load call target address".into(), }, RealizedOp { opcode: AllocatedInstruction::JAL( AllocatedRegister::Constant(ConstantRegister::CallReturnAddress), AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate12::new(0), ), owning_span, comment, }, ]; } // if the offset is too large for MOVI, use data section to store the full offset. let data_id = data_section.insert_data_value(Entry::new_word( delta_instr, EntryName::NonConfigurable, None, )); return vec![ RealizedOp { opcode: AllocatedInstruction::LoadDataId( AllocatedRegister::Constant(ConstantRegister::Scratch), data_id, ), owning_span: owning_span.clone(), comment: "load call target address".into(), }, RealizedOp { opcode: AllocatedInstruction::ADD( AllocatedRegister::Constant(ConstantRegister::Scratch), AllocatedRegister::Constant(ConstantRegister::ProgramCounter), AllocatedRegister::Constant(ConstantRegister::Scratch),
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/data_section.rs
sway-core/src/asm_generation/fuel/data_section.rs
use rustc_hash::FxHashMap; use sway_ir::{ size_bytes_round_up_to_word_alignment, ConstantContent, ConstantValue, Context, Padding, }; use std::fmt; #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize)] pub enum EntryName { NonConfigurable, Configurable(String), } impl fmt::Display for EntryName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { EntryName::NonConfigurable => write!(f, "NonConfigurable"), EntryName::Configurable(name) => write!(f, "<Configurable, {name}>"), } } } // An entry in the data section. It's important for the size to be correct, especially for unions // where the size could be larger than the represented value. #[derive(Clone, Debug, serde::Serialize)] pub struct Entry { pub value: Datum, pub padding: Padding, pub name: EntryName, } #[derive(Clone, Debug, serde::Serialize)] pub enum Datum { Byte(u8), Word(u64), ByteArray(Vec<u8>), Slice(Vec<u8>), Collection(Vec<Entry>), } impl Entry { pub(crate) fn new_byte(value: u8, name: EntryName, padding: Option<Padding>) -> Entry { Entry { value: Datum::Byte(value), padding: padding.unwrap_or(Padding::default_for_u8(value)), name, } } pub(crate) fn new_word(value: u64, name: EntryName, padding: Option<Padding>) -> Entry { Entry { value: Datum::Word(value), padding: padding.unwrap_or(Padding::default_for_u64(value)), name, } } pub(crate) fn new_byte_array( bytes: Vec<u8>, name: EntryName, padding: Option<Padding>, ) -> Entry { Entry { padding: padding.unwrap_or(Padding::default_for_byte_array(&bytes)), value: Datum::ByteArray(bytes), name, } } pub(crate) fn new_slice(bytes: Vec<u8>, name: EntryName, padding: Option<Padding>) -> Entry { Entry { padding: padding.unwrap_or(Padding::default_for_byte_array(&bytes)), value: Datum::Slice(bytes), name, } } pub(crate) fn new_collection( elements: Vec<Entry>, name: EntryName, padding: Option<Padding>, ) -> Entry { Entry { padding: padding.unwrap_or(Padding::default_for_aggregate( elements.iter().map(|el| el.padding.target_size()).sum(), )), value: Datum::Collection(elements), name, } } pub(crate) fn from_constant( context: &Context, constant: &ConstantContent, name: EntryName, padding: Option<Padding>, ) -> Entry { // We need a special handling in case of enums. if constant.ty.is_enum(context) { let (tag, value) = constant .enum_tag_and_value_with_paddings(context) .expect("Constant is an enum."); let tag_entry = Entry::from_constant(context, tag.0, EntryName::NonConfigurable, tag.1); let value_entry = Entry::from_constant(context, value.0, EntryName::NonConfigurable, value.1); return Entry::new_collection(vec![tag_entry, value_entry], name, padding); } // Not an enum, no more special handling required. match &constant.value { ConstantValue::Undef | ConstantValue::Unit => Entry::new_byte(0, name, padding), ConstantValue::Bool(value) => Entry::new_byte(u8::from(*value), name, padding), ConstantValue::Uint(value) => { if constant.ty.is_uint8(context) { Entry::new_byte(*value as u8, name, padding) } else { Entry::new_word(*value, name, padding) } } ConstantValue::U256(value) => { Entry::new_byte_array(value.to_be_bytes().to_vec(), name, padding) } ConstantValue::B256(value) => { Entry::new_byte_array(value.to_be_bytes().to_vec(), name, padding) } ConstantValue::String(bytes) => Entry::new_byte_array(bytes.clone(), name, padding), ConstantValue::Array(_) => Entry::new_collection( constant .array_elements_with_padding(context) .expect("Constant is an array.") .into_iter() .map(|(elem, padding)| { Entry::from_constant(context, elem, EntryName::NonConfigurable, padding) }) .collect(), name, padding, ), ConstantValue::Struct(_) => Entry::new_collection( constant .struct_fields_with_padding(context) .expect("Constant is a struct.") .into_iter() .map(|(elem, padding)| { Entry::from_constant(context, elem, EntryName::NonConfigurable, padding) }) .collect(), name, padding, ), ConstantValue::RawUntypedSlice(bytes) => Entry::new_slice(bytes.clone(), name, padding), ConstantValue::Reference(_) => { todo!("Constant references are currently not supported.") } ConstantValue::Slice(_) => { todo!("Constant slices are currently not supported.") } } } /// Converts a literal to a big-endian representation. This is padded to words. pub(crate) fn to_bytes(&self) -> Vec<u8> { // Get the big-endian byte representation of the basic value. let bytes = match &self.value { Datum::Byte(value) => vec![*value], Datum::Word(value) => value.to_be_bytes().to_vec(), Datum::ByteArray(bytes) | Datum::Slice(bytes) if bytes.len() % 8 == 0 => bytes.clone(), Datum::ByteArray(bytes) | Datum::Slice(bytes) => bytes .iter() .chain([0; 8].iter()) .copied() .take((bytes.len() + 7) & 0xfffffff8_usize) .collect(), Datum::Collection(items) => items.iter().flat_map(|el| el.to_bytes()).collect(), }; let final_padding = self.padding.target_size().saturating_sub(bytes.len()); match self.padding { Padding::Left { .. } => { [std::iter::repeat_n(0u8, final_padding).collect(), bytes].concat() } Padding::Right { .. } => { [bytes, std::iter::repeat_n(0u8, final_padding).collect()].concat() } } } pub(crate) fn has_copy_type(&self) -> bool { matches!(self.value, Datum::Word(_) | Datum::Byte(_)) } pub(crate) fn is_byte(&self) -> bool { matches!(self.value, Datum::Byte(_)) } pub(crate) fn equiv(&self, entry: &Entry) -> bool { fn equiv_data(lhs: &Datum, rhs: &Datum) -> bool { match (lhs, rhs) { (Datum::Byte(l), Datum::Byte(r)) => l == r, (Datum::Word(l), Datum::Word(r)) => l == r, (Datum::ByteArray(l), Datum::ByteArray(r)) => l == r, (Datum::Collection(l), Datum::Collection(r)) => { l.len() == r.len() && l.iter() .zip(r.iter()) .all(|(l, r)| equiv_data(&l.value, &r.value)) } _ => false, } } // If this corresponds to a configuration-time constants, then the entry names will be // available (i.e. `Some(..)`) and they must be the same before we can merge the two // entries. Otherwise, `self.name` and `entry.name` will be `None` in which case we're also // allowed to merge the two entries (if their values are equivalent of course). equiv_data(&self.value, &entry.value) && self.name == entry.name } } #[derive(Clone, Debug)] pub enum DataIdEntryKind { NonConfigurable, Configurable, } impl fmt::Display for DataIdEntryKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { DataIdEntryKind::NonConfigurable => write!(f, "NonConfigurable"), DataIdEntryKind::Configurable => write!(f, "Configurable"), } } } /// An address which refers to a value in the data section of the asm. #[derive(Clone, Debug)] pub(crate) struct DataId { pub(crate) idx: u32, pub(crate) kind: DataIdEntryKind, } impl fmt::Display for DataId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "data_{}_{}", self.kind, self.idx) } } /// The data to be put in the data section of the asm #[derive(Default, Clone, Debug)] pub struct DataSection { pub non_configurables: Vec<Entry>, pub configurables: Vec<Entry>, pub(crate) pointer_id: FxHashMap<u64, DataId>, } impl DataSection { /// Get the number of entries pub fn num_entries(&self) -> usize { self.non_configurables.len() + self.configurables.len() } /// Iterate over all entries, non-configurables followed by configurables pub fn iter_all_entries(&self) -> impl Iterator<Item = Entry> + '_ { self.non_configurables .iter() .chain(self.configurables.iter()) .cloned() } /// Get the absolute index of an id fn absolute_idx(&self, id: &DataId) -> usize { match id.kind { DataIdEntryKind::NonConfigurable => id.idx as usize, DataIdEntryKind::Configurable => id.idx as usize + self.non_configurables.len(), } } /// Get entry at id fn get(&self, id: &DataId) -> Option<&Entry> { match id.kind { DataIdEntryKind::NonConfigurable => self.non_configurables.get(id.idx as usize), DataIdEntryKind::Configurable => self.configurables.get(id.idx as usize), } } /// Given a [DataId], calculate the offset _from the beginning of the data section_ to the data /// in bytes. pub(crate) fn data_id_to_offset(&self, id: &DataId) -> usize { let idx = self.absolute_idx(id); self.absolute_idx_to_offset(idx) } /// Given an absolute index, calculate the offset _from the beginning of the data section_ to the data /// in bytes. pub(crate) fn absolute_idx_to_offset(&self, idx: usize) -> usize { self.iter_all_entries().take(idx).fold(0, |offset, entry| { //entries must be word aligned size_bytes_round_up_to_word_alignment!(offset + entry.to_bytes().len()) }) } pub(crate) fn serialize_to_bytes(&self) -> Vec<u8> { // not the exact right capacity but serves as a lower bound let mut buf = Vec::with_capacity(self.num_entries()); for entry in self.iter_all_entries() { buf.append(&mut entry.to_bytes()); //entries must be word aligned let aligned_len = size_bytes_round_up_to_word_alignment!(buf.len()); buf.extend(vec![0u8; aligned_len - buf.len()]); } buf } /// Returns whether a specific [DataId] value has a copy type (fits in a register). pub(crate) fn has_copy_type(&self, id: &DataId) -> Option<bool> { self.get(id).map(|entry| entry.has_copy_type()) } /// Returns whether a specific [DataId] value is a byte entry. pub(crate) fn is_byte(&self, id: &DataId) -> Option<bool> { self.get(id).map(|entry| entry.is_byte()) } /// When generating code, sometimes a hard-coded data pointer is needed to reference /// static values that have a length longer than one word. /// This method appends pointers to the end of the data section (thus, not altering the data /// offsets of previous data). /// `pointer_value` is in _bytes_ and refers to the offset from instruction start or /// relative to the current (load) instruction. pub(crate) fn append_pointer(&mut self, pointer_value: u64) -> DataId { // The 'pointer' is just a literal 64 bit address. let data_id = self.insert_data_value(Entry::new_word( pointer_value, EntryName::NonConfigurable, None, )); self.pointer_id.insert(pointer_value, data_id.clone()); data_id } /// Get the [DataId] for a pointer, if it exists. /// The pointer must've been inserted with append_pointer. pub(crate) fn data_id_of_pointer(&self, pointer_value: u64) -> Option<DataId> { self.pointer_id.get(&pointer_value).cloned() } /// Given any data in the form of a [Literal] (using this type mainly because it includes type /// information and debug spans), insert it into the data section and return its handle as /// [DataId]. pub(crate) fn insert_data_value(&mut self, new_entry: Entry) -> DataId { // if there is an identical data value, use the same id let (value_pairs, kind) = match new_entry.name { EntryName::NonConfigurable => ( &mut self.non_configurables, DataIdEntryKind::NonConfigurable, ), EntryName::Configurable(_) => (&mut self.configurables, DataIdEntryKind::Configurable), }; match value_pairs.iter().position(|entry| entry.equiv(&new_entry)) { Some(num) => DataId { idx: num as u32, kind, }, None => { value_pairs.push(new_entry); // the index of the data section where the value is stored DataId { idx: (value_pairs.len() - 1) as u32, kind, } } } } // If the stored data is Datum::Word, return the inner value. pub(crate) fn get_data_word(&self, data_id: &DataId) -> Option<u64> { let value_pairs = match data_id.kind { DataIdEntryKind::NonConfigurable => &self.non_configurables, DataIdEntryKind::Configurable => &self.configurables, }; value_pairs.get(data_id.idx as usize).and_then(|entry| { if let Datum::Word(w) = entry.value { Some(w) } else { None } }) } } impl fmt::Display for DataSection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn display_entry(datum: &Datum) -> String { match datum { Datum::Byte(w) => format!(".byte {w}"), Datum::Word(w) => format!(".word {w}"), Datum::ByteArray(bs) => display_bytes_for_data_section(bs, ".bytes"), Datum::Slice(bs) => display_bytes_for_data_section(bs, ".slice"), Datum::Collection(els) => format!( ".collection {{ {} }}", els.iter() .map(|el| display_entry(&el.value)) .collect::<Vec<_>>() .join(", ") ), } } use std::fmt::Write; let mut data_buf = String::new(); for (ix, entry) in self.iter_all_entries().enumerate() { writeln!( data_buf, "data_{}_{} {}", entry.name, ix, display_entry(&entry.value) )?; } write!(f, ".data:\n{data_buf}") } } fn display_bytes_for_data_section(bs: &Vec<u8>, prefix: &str) -> String { let mut hex_str = String::new(); let mut chr_str = String::new(); for b in bs { hex_str.push_str(format!("{b:02x} ").as_str()); chr_str.push(if *b == b' ' || b.is_ascii_graphic() { *b as char } else { '.' }); } format!("{prefix}[{}] {hex_str} {chr_str}", bs.len()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/register_allocator.rs
sway-core/src/asm_generation/fuel/register_allocator.rs
use crate::{ asm_generation::fuel::{analyses::liveness_analysis, compiler_constants}, asm_lang::{ allocated_ops::AllocatedRegister, virtual_register::*, AllocatedAbstractOp, Op, VirtualImmediate12, VirtualImmediate18, VirtualImmediate24, VirtualOp, }, }; use either::Either; use indexmap::IndexMap; use petgraph::{ stable_graph::NodeIndex, visit::EdgeRef, Direction::{Incoming, Outgoing}, }; use rustc_hash::{FxHashMap, FxHashSet}; use std::cmp::Ordering; use std::collections::{hash_map, BTreeSet, HashMap}; use sway_error::error::CompileError; use sway_ir::size_bytes_round_up_to_word_alignment; use sway_types::{FxIndexSet, Span}; use super::allocated_abstract_instruction_set::AllocatedAbstractInstructionSet; // Each node in the interference graph represents a VirtualRegister. // An edge from V1 -> V2 means that V2 was an open live range at the // the time V1 was defined. For spilling, incoming edges matter more // as it indicates how big the range is, and thus is better to spill. // An edge has a "bool" weight to indicate whether it was deleted // during colouring. We don't actually delete the edge because that's // required again during the actual assignment. pub type InterferenceGraph = petgraph::stable_graph::StableGraph<VirtualRegister, bool, petgraph::Directed>; // Initially, the bytecode will have a lot of individual registers being used. Each register will // have a new unique identifier. For example, two separate invocations of `+` will result in 4 // registers being used for arguments and 2 for outputs. // // After that, the level 0 bytecode will go through a process where register use is minified, // producing level 1 bytecode. This process is as such: // // 1. Detect the last time a register is read. After that, it can be reused and recycled to fit the // needs of the next "level 0 bytecode" register // // 2. Detect needless assignments and movements, and substitute registers in. // i.e. // a = b // c = a // // would become // c = b // // // After the level 1 bytecode is produced, level 2 bytecode is created by limiting the maximum // number of registers and inserting bytecode to read from/write to memory where needed. Ideally, // the algorithm for determining which registers will be written off to memory is based on how // frequently that register is accessed in a particular section of code. Using this strategy, we // hope to minimize memory writing. // // For each line, the number of times a virtual register is accessed between then and the end of the // program is its register precedence. A virtual register's precedence is 0 if it is currently in // "memory", and the above described number if it is not. This prevents over-prioritization of // registers that have already been written off to memory. // /// The [SwayAsmSet] contains either a contract ABI and corresponding ASM, a script's main /// function's ASM, or a predicate's main function's ASM. ASM is never generated for libraries, /// as that happens when the library itself is imported. #[derive(Debug)] struct RegisterAllocationStatus { reg: AllocatedRegister, used_by: BTreeSet<VirtualRegister>, } #[derive(Debug)] pub(crate) struct RegisterPool { registers: Vec<RegisterAllocationStatus>, } impl RegisterPool { fn init() -> Self { let reg_pool: Vec<RegisterAllocationStatus> = (0 // - 1 because we reserve the final register for the data_section begin ..compiler_constants::NUM_ALLOCATABLE_REGISTERS) .map(|x| RegisterAllocationStatus { reg: AllocatedRegister::Allocated(x), used_by: BTreeSet::new(), }) .collect(); Self { registers: reg_pool, } } pub(crate) fn get_register( &self, virtual_register: &VirtualRegister, ) -> Option<AllocatedRegister> { let allocated_reg = self.registers .iter() .find(|RegisterAllocationStatus { reg: _, used_by }| { used_by.contains(virtual_register) }); allocated_reg.map(|RegisterAllocationStatus { reg, used_by: _ }| reg.clone()) } } /// Given a list of instructions `ops` and a `live_out` table computed using the method /// `liveness_analysis()`, create an interference graph (aka a "conflict" graph): /// * Nodes = virtual registers /// * Edges = overlapping live ranges /// /// Two virtual registers interfere if there exists a point in the program where both are /// simultaneously live. If `v1` and `v2` interfere, they cannot be allocated to the same register. /// /// Algorithm: /// =============================================================================================== /// 1. create a graph node for every virtual register used. /// 2. for a MOVE "v <= c" with live_out virtual registers b1, ... bn for v: /// add edges (v, b_1), ..., (v, b_n) for any b_i different from c. /// 3. for non-MOVE def of virtual register v with live_out virtual registers b_1, ..., b_n: /// add edges (v, b_1), ..., (v, b_n) /// /// =============================================================================================== pub(crate) fn create_interference_graph( ops: &[Op], live_out: &[BTreeSet<VirtualRegister>], ) -> (InterferenceGraph, HashMap<VirtualRegister, NodeIndex>) { let mut interference_graph = InterferenceGraph::with_capacity(0, 0); // Figure out a mapping between a given VirtualRegister and its corresponding NodeIndex // in the interference graph. let mut reg_to_node_map: HashMap<VirtualRegister, NodeIndex> = HashMap::new(); // Get all virtual registers used by the intermediate assembly and add them to the graph ops.iter() .fold(BTreeSet::new(), |mut tree, elem| { let mut regs = elem.registers(); regs.retain(|&reg| reg.is_virtual()); tree.extend(regs); tree }) .iter() .for_each(|&reg| { reg_to_node_map.insert(reg.clone(), interference_graph.add_node(reg.clone())); }); for (ix, regs) in live_out.iter().enumerate() { match &ops[ix].opcode { Either::Left(VirtualOp::MOVE(v, c)) => { if let Some(ix1) = reg_to_node_map.get(v) { for b in regs.iter() { if let Some(ix2) = reg_to_node_map.get(b) { // Add edge (v, b) if b != c // Also, avoid adding self edges if *b != *c && *b != *v { interference_graph.update_edge(*ix1, *ix2, true); } } } } } _ => { for v in &ops[ix].def_registers() { if let Some(ix1) = reg_to_node_map.get(v) { for b in regs.iter() { if let Some(ix2) = reg_to_node_map.get(b) { // Add edge (v, b) // Avoid adding self edges if *b != **v { interference_graph.update_edge(*ix1, *ix2, true); } } } } } } } } (interference_graph, reg_to_node_map) } /// Given a list of instructions `ops` and a corresponding interference_graph, generate a new, /// smaller list of instructions, where unnecessary MOVE instructions have been removed. When an /// unnecessary MOVE is detected and removed, the two virtual registers used by the MOVE are said /// to be "coalesced" and the two corresponding nodes in the graph are then merged. /// /// Two important aspects of this for our implementation: /// * When two registers are coalesced, a new node with a new virtual register (generated using the /// register sequencer) is created in the interference graph. /// * When a MOVE instruction is removed, the offset of each subsequent instruction has to be /// updated, as well as the immediate values for some or all jump instructions (`ji`, `jnei`, and /// `jnzi for now). /// pub(crate) fn coalesce_registers( ops: &[Op], live_out: Vec<BTreeSet<VirtualRegister>>, interference_graph: &mut InterferenceGraph, reg_to_node_map: &mut HashMap<VirtualRegister, NodeIndex>, ) -> (Vec<Op>, Vec<BTreeSet<VirtualRegister>>) { // A map from the virtual registers that are removed to the virtual registers that they are // replaced with during the coalescing process. let mut reg_to_reg_map = IndexMap::<&VirtualRegister, &VirtualRegister>::new(); // To hold the final *reduced* list of ops let mut reduced_ops: Vec<Op> = Vec::with_capacity(ops.len()); let mut reduced_live_out: Vec<BTreeSet<VirtualRegister>> = Vec::with_capacity(live_out.len()); assert!(ops.len() == live_out.len()); for (op_idx, op) in ops.iter().enumerate() { match &op.opcode { Either::Left(VirtualOp::MOVE(x, y)) => { match (x, y) { (VirtualRegister::Virtual(_), VirtualRegister::Virtual(_)) => { // Use reg_to_reg_map to figure out what x and y have been replaced // with. We keep looking for mappings within reg_to_reg_map until we find a // register that doesn't map to any other. let mut r1 = x; while let Some(t) = reg_to_reg_map.get(r1) { r1 = t; } let mut r2 = y; while let Some(t) = reg_to_reg_map.get(r2) { r2 = t; } // Find the interference graph nodes that corresponding to r1 and r2 let ix1 = reg_to_node_map.get(r1).unwrap(); let ix2 = reg_to_node_map.get(r2).unwrap(); // If r1 and r2 are the same, the MOVE instruction can be safely removed, // i.e., not added to reduced_ops if r1 == r2 { continue; } let r1_neighbours = interference_graph .neighbors_undirected(*ix1) .collect::<FxIndexSet<_>>(); let r2_neighbours = interference_graph .neighbors_undirected(*ix2) .collect::<FxIndexSet<_>>(); // Using either of the two safety conditions below, it's guaranteed // that we aren't turning a k-colourable graph into one that's not, // by doing the coalescing. Ref: "Coalescing" section in Appel's book. let briggs_safety = r1_neighbours .union(&r2_neighbours) .filter(|&&neighbour| { interference_graph.neighbors_undirected(neighbour).count() >= compiler_constants::NUM_ALLOCATABLE_REGISTERS as usize }) .count() < compiler_constants::NUM_ALLOCATABLE_REGISTERS as usize; let george_safety = r2_neighbours.iter().all(|&r2_neighbor| { r1_neighbours.contains(&r2_neighbor) || interference_graph.neighbors_undirected(r2_neighbor).count() < compiler_constants::NUM_ALLOCATABLE_REGISTERS as usize }); let safe = briggs_safety || george_safety; // If r1 and r2 are connected in the interference graph (i.e. their // respective liveness ranges overalp), preserve the MOVE instruction by // adding it to reduced_ops if interference_graph.contains_edge(*ix1, *ix2) || interference_graph.contains_edge(*ix2, *ix1) || !safe { reduced_ops.push(op.clone()); reduced_live_out.push(live_out[op_idx].clone()); continue; } // The MOVE instruction can now be safely removed. That is, we simply don't // add it to the reduced_ops vector. Also, we combine the two nodes ix1 and // ix2 into ix2 and then we remove ix1 from the graph. We also have // to do some bookkeeping. // // Note that because the interference graph is of type StableGraph, the // node index corresponding to each virtual register does not change when // some graph nodes are added or removed. // Add all of ix1(r1)'s edges to `ix2(r2)` as incoming edges. for neighbor in r1_neighbours { if !interference_graph.contains_edge(*ix2, neighbor) { interference_graph.update_edge(neighbor, *ix2, true); } } // Remove ix1. interference_graph.remove_node(*ix1); // Update the register maps reg_to_node_map.insert(r1.clone(), *ix2); reg_to_reg_map.insert(r1, r2); } _ => { // Preserve the MOVE instruction if either registers used in the MOVE is // special registers (i.e. *not* a VirtualRegister::Virtual(_)) reduced_ops.push(op.clone()); reduced_live_out.push(live_out[op_idx].clone()); } } } _ => { // Preserve all other instructions reduced_ops.push(op.clone()); reduced_live_out.push(live_out[op_idx].clone()); } } } // Create a *final* reg-to-reg map that We keep looking for mappings within reg_to_reg_map // until we find a register that doesn't map to any other. let mut final_reg_to_reg_map = IndexMap::<&VirtualRegister, &VirtualRegister>::new(); for reg in reg_to_reg_map.keys() { let mut temp = reg; while let Some(t) = reg_to_reg_map.get(temp) { temp = t; } final_reg_to_reg_map.insert(reg, temp); } // Update the registers for all instructions using final_reg_to_reg_map for new_op in &mut reduced_ops { *new_op = new_op.update_register(&final_reg_to_reg_map); } for new_live_out in &mut reduced_live_out { for (old, &new) in &final_reg_to_reg_map { if new_live_out.remove(old) { new_live_out.insert(new.clone()); } } } (reduced_ops, reduced_live_out) } // For every virtual register, compute its (def points, use points). fn compute_def_use_points(ops: &[Op]) -> FxHashMap<VirtualRegister, (Vec<usize>, Vec<usize>)> { let mut res: FxHashMap<VirtualRegister, (Vec<usize>, Vec<usize>)> = FxHashMap::default(); for (idx, op) in ops.iter().enumerate() { let mut op_use = op.use_registers(); let mut op_def = op.def_registers(); op_use.retain(|&reg| reg.is_virtual()); op_def.retain(|&reg| reg.is_virtual()); for &vreg in op_use.iter().filter(|reg| reg.is_virtual()) { match res.entry(vreg.clone()) { hash_map::Entry::Occupied(mut occ) => { occ.get_mut().1.push(idx); } hash_map::Entry::Vacant(vac) => { vac.insert((vec![], vec![idx])); } } } for &vreg in op_def.iter().filter(|reg| reg.is_virtual()) { match res.entry(vreg.clone()) { hash_map::Entry::Occupied(mut occ) => { occ.get_mut().0.push(idx); } hash_map::Entry::Vacant(vac) => { vac.insert((vec![idx], vec![])); } } } } res } /// Given an interference graph and a integer k, figure out if the graph k-colorable. Graph /// coloring is an NP-complete problem, but the algorithm below is a simple stack based /// approximation that relies on the fact that any node n in the graph that has fewer than k /// neighbors can always be colored. /// /// Algorithm: /// =============================================================================================== /// 1. Pick any node n such that degree(n) < k and put it on the stack. /// 2. Remove node n and all its edges from the graph /// - This may make some new nodes have fewer than k neighbours which is nice. /// 3. If some vertex n still has k or more neighbors, then the graph may not be k colorable. /// We still add it to the stack as is, as a potential spill. When popping, if we still /// can't colour it, then it becomes an actual spill. /// /// =============================================================================================== /// pub(crate) fn color_interference_graph( interference_graph: &mut InterferenceGraph, ops: &[Op], live_out: &[BTreeSet<VirtualRegister>], ) -> Result<Vec<NodeIndex>, FxHashSet<VirtualRegister>> { let mut stack = Vec::with_capacity(interference_graph.node_count()); let mut on_stack = FxHashSet::default(); let mut spills = FxHashSet::default(); let def_use_points = compute_def_use_points(ops); // Nodes with < k-degree before adding to the stack, // to have their neighbours processed. let mut worklist = vec![]; // Nodes as yet having >= k-degree. let mut pending = FxHashSet::default(); for node in interference_graph.node_indices() { let num_neighbors = interference_graph.neighbors_undirected(node).count(); if num_neighbors < compiler_constants::NUM_ALLOCATABLE_REGISTERS as usize { worklist.push(node); } else { pending.insert(node); } } // Get outgoing "true" edged neighbors. fn get_connected_outgoing_neighbors( interference_graph: &InterferenceGraph, node_index: NodeIndex, ) -> impl Iterator<Item = NodeIndex> + '_ { interference_graph .edges_directed(node_index, Outgoing) .filter_map(|e| interference_graph[e.id()].then_some(e.target())) } // Get incoming "true" edged neighbors. fn get_connected_incoming_neighbors( interference_graph: &InterferenceGraph, node_index: NodeIndex, ) -> impl Iterator<Item = NodeIndex> + '_ { interference_graph .edges_directed(node_index, Incoming) .filter_map(|e| interference_graph[e.id()].then_some(e.source())) } // Get neighbours (either direction) connected via a "true" edge. fn get_connected_neighbours( interference_graph: &InterferenceGraph, node_index: NodeIndex, ) -> impl Iterator<Item = NodeIndex> + '_ { get_connected_outgoing_neighbors(interference_graph, node_index).chain( get_connected_incoming_neighbors(interference_graph, node_index), ) } // Mark edges to/from node satisfying the conditions as deleted. fn delete_edges<P: Fn(&VirtualRegister, &VirtualRegister) -> bool>( interference_graph: &mut InterferenceGraph, node_index: NodeIndex, should_delete: P, ) { let edges: Vec<_> = interference_graph .edges_directed(node_index, Outgoing) .chain(interference_graph.edges_directed(node_index, Incoming)) .map(|edge| edge.id()) .collect(); for e in edges { let (source, target) = interference_graph.edge_endpoints(e).unwrap(); { if should_delete(&interference_graph[source], &interference_graph[target]) { interference_graph[e] = false; } } } } loop { while let Some(node_index) = worklist.pop() { // Ensure that we've not already processed this. if on_stack.contains(&node_index) { continue; } // This node is colourable. stack.push(node_index); on_stack.insert(node_index); // When spilled, not all edges should be deleted, and the spilling // code takes care of deleting the right edges. if !spills.contains(&interference_graph[node_index]) { // Delete all edges connected to node_index. delete_edges(interference_graph, node_index, |_, _| true) } let candidate_neighbors: Vec<_> = interference_graph .neighbors_undirected(node_index) .filter(|n| { pending.contains(n) && get_connected_neighbours(interference_graph, *n).count() < compiler_constants::NUM_ALLOCATABLE_REGISTERS as usize }) .collect(); for candidate_neighbor in &candidate_neighbors { pending.remove(candidate_neighbor); worklist.push(*candidate_neighbor); } } // At the moment, our spill priority function is just this, // i.e., spill the register with more incoming interferences. // (roughly indicating how long the interval is). if let Some(spill_reg_index) = pending.iter().copied().max_by(|node1, node2| { let node1_priority = get_connected_incoming_neighbors(interference_graph, *node1).count(); let node2_priority = get_connected_incoming_neighbors(interference_graph, *node2).count(); match node1_priority.cmp(&node2_priority) { Ordering::Equal => { // Equal priorities are broken deterministically and do not alter the spill heuristic. let reg_cmp = interference_graph[*node1].cmp(&interference_graph[*node2]); if reg_cmp == Ordering::Equal { node1.index().cmp(&node2.index()) } else { reg_cmp } } other => other, } }) { let spill_reg = interference_graph[spill_reg_index].clone(); spills.insert(spill_reg.clone()); // Update the interference graph that this is spilled. // A spill implies a store right after a definition and // a load right before a use, forming new tiny live ranges. // So we retain only those interferences that correspond to // these tiny live ranges and remove the rest. let to_retain = def_use_points .get(&spill_reg) .map_or(FxHashSet::default(), |(defs, uses)| { let mut retains = FxHashSet::default(); for &def in defs { retains .extend(live_out[def].iter().filter(|reg| !spills.contains(*reg))); } for &r#use in uses.iter().filter(|&&r#use| r#use > 0) { retains.extend( live_out[r#use - 1] .iter() .filter(|reg| !spills.contains(*reg)), ); } retains }); delete_edges(interference_graph, spill_reg_index, |source, target| { !(to_retain.contains(source) || to_retain.contains(target)) }); pending.remove(&spill_reg_index); worklist.push(spill_reg_index); } else { break; } } if spills.is_empty() { Ok(stack) } else { Err(spills) } } /// Assigns an allocatable register to each virtual register used by some instruction in the /// list `self.ops`. The algorithm used is Chaitin's graph-coloring register allocation /// algorithm (https://en.wikipedia.org/wiki/Chaitin%27s_algorithm). The individual steps of /// the algorithm are thoroughly explained in register_allocator.rs. pub(crate) fn allocate_registers( ops: &[Op], ) -> Result<AllocatedAbstractInstructionSet, CompileError> { enum ColouringResult { Success { updated_ops: Vec<Op>, interference_graph: InterferenceGraph, colouring_stack: Vec<NodeIndex>, }, SpillsNeeded { updated_ops: Vec<Op>, spills: FxHashSet<VirtualRegister>, }, } fn try_color(ops: &[Op]) -> ColouringResult { // Step 1: Liveness Analysis. let live_out = liveness_analysis(ops, true); // Step 2: Construct the interference graph. let (mut interference_graph, mut reg_to_node_ix) = create_interference_graph(ops, &live_out); // Step 3: Remove redundant MOVE instructions using the interference graph. let (updated_ops, live_out) = coalesce_registers(ops, live_out, &mut interference_graph, &mut reg_to_node_ix); // Step 4: Simplify - i.e. color the interference graph and return a stack that contains // each colorable node and its neighbors. match color_interference_graph(&mut interference_graph, &updated_ops, &live_out) { Ok(colouring_stack) => ColouringResult::Success { updated_ops, interference_graph, colouring_stack, }, Err(spills) => ColouringResult::SpillsNeeded { updated_ops, spills, }, } } // We start with the ops we're given. let mut updated_ops_ref = ops; // A placeholder for updated ops. let mut updated_ops; // How many times to try spilling before we give up. let mut try_count = 0; // Try and assign registers. If we fail, spill. Repeat few times. let (updated_ops, interference_graph, mut stack) = loop { match try_color(updated_ops_ref) { ColouringResult::Success { updated_ops, interference_graph, colouring_stack, } => { break (updated_ops, interference_graph, colouring_stack); } ColouringResult::SpillsNeeded { updated_ops: updated_ops_before_spill, spills, } => { if try_count >= 4 { let comment = updated_ops_before_spill .iter() .find_map(|op| { if let Either::Right(crate::asm_lang::ControlFlowOp::Label(_)) = op.opcode { Some(op.comment.clone()) } else { None } }) .unwrap_or("unknown".into()); return Err(CompileError::InternalOwned( format!( "The allocator cannot resolve a register mapping for function {comment}. \ Using #[inline(never)] on some functions may help." ), Span::dummy(), )); } try_count += 1; updated_ops = spill(&updated_ops_before_spill, &spills); updated_ops_ref = &updated_ops; } } }; // Step 5: Use the stack to assign a register for each virtual register. let pool = assign_registers(&interference_graph, &mut stack)?; // Step 6: Update all instructions to use the resulting register pool. let mut buf = vec![]; for op in &updated_ops { buf.push(AllocatedAbstractOp { opcode: op.allocate_registers(&pool), comment: op.comment.clone(), owning_span: op.owning_span.clone(), }) } Ok(AllocatedAbstractInstructionSet { ops: buf }) } /// Use the stack generated by the coloring algorithm to figure out a register assignment for each /// virtual register. The idea here is to successively pop the stack while selecting a register to /// each virtual register. A register r is available to a virtual register v if the intersection of /// the neighbors of v (available from the stack) and the list of virtual registers already used by /// r (available in the used_by field) is empty. /// fn assign_registers( interference_graph: &InterferenceGraph, stack: &mut Vec<NodeIndex>, ) -> Result<RegisterPool, CompileError> { let mut pool = RegisterPool::init(); while let Some(node) = stack.pop() { let reg = interference_graph[node].clone(); let neighbors: BTreeSet<VirtualRegister> = interference_graph .neighbors_undirected(node) .map(|neighbor| interference_graph[neighbor].clone()) .collect(); if reg.is_virtual() { let available = pool.registers .iter_mut() .find(|RegisterAllocationStatus { reg: _, used_by }| { neighbors.intersection(used_by).count() == 0 }); if let Some(RegisterAllocationStatus { reg: _, used_by }) = available { used_by.insert(reg.clone()); } else { return Err(CompileError::Internal( "The allocator cannot resolve a register mapping for this program. \ Using #[inline(never)] on some functions may help.", Span::dummy(), )); } } } Ok(pool) } /// Given a function, its locals info (stack frame usage details) /// and a set of virtual registers to be spilled, insert the actual spills /// and return the updated function and the updated stack info. fn spill(ops: &[Op], spills: &FxHashSet<VirtualRegister>) -> Vec<Op> { let mut spilled: Vec<Op> = vec![]; // Attempt to discover the current stack size and base register. let mut cfe_idx_opt = None; let mut cfs_idx_opt = None; for (op_idx, op) in ops.iter().enumerate() { match &op.opcode { Either::Left(VirtualOp::CFEI(..)) => { assert!(cfe_idx_opt.is_none(), "Found more than one stack extension"); cfe_idx_opt = Some(op_idx); } Either::Left(VirtualOp::CFSI(..)) => { assert!(cfs_idx_opt.is_none(), "Found more than one stack shrink"); cfs_idx_opt = Some(op_idx); } _ => (), } } let cfe_idx = cfe_idx_opt.expect("Function does not have CFEI instruction for locals"); let Either::Left(VirtualOp::CFEI( VirtualRegister::Constant(ConstantRegister::StackPointer), virt_imm_24, )) = &ops[cfe_idx].opcode else { panic!("Unexpected opcode"); }; let locals_size_bytes = virt_imm_24.value(); // pad up the locals size in bytes to a word. let locals_size_bytes = size_bytes_round_up_to_word_alignment!(locals_size_bytes); // Determine the stack slots for each spilled register. let spill_offsets_bytes = spill_offsets(spills, locals_size_bytes); let spills_size = (8 * spills.len()) as u32; let new_locals_byte_size = locals_size_bytes + spills_size; if new_locals_byte_size > compiler_constants::TWENTY_FOUR_BITS as u32 { panic!("Enormous stack usage for locals."); } for (op_idx, op) in ops.iter().enumerate() { if op_idx == cfe_idx { // This is the CFE instruction, use the new stack size. spilled.push(Op { opcode: Either::Left(VirtualOp::CFEI(
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/functions.rs
sway-core/src/asm_generation/fuel/functions.rs
use crate::{ asm_generation::{ from_ir::*, fuel::{ compiler_constants::{self, TWELVE_BITS}, data_section::Entry, fuel_asm_builder::FuelAsmBuilder, }, ProgramKind, }, asm_lang::{ virtual_register::{self, *}, JumpType, Op, OrganizationalOp, VirtualImmediate12, VirtualImmediate18, VirtualImmediate24, VirtualOp, }, decl_engine::DeclRef, fuel_prelude::fuel_asm::GTFArgs, }; use sway_ir::*; use either::Either; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::{Ident, Span}; use super::{compiler_constants::NUM_ARG_REGISTERS, data_section::EntryName}; /// A summary of the adopted calling convention: /// /// - Function arguments are passed left to right in the reserved registers. Extra args are passed /// on the stack. /// - The return value is returned in $retv. /// - The return address is passed in $reta. /// - All other general purpose registers must be preserved. /// /// If the return value has a copy-type it can be returned in $retv directly. If the return /// value is a ref-type its space must be allocated by the caller and its address passed into /// (and out of) the callee using $retv. /// /// The general process for a call is therefore the following. Not all steps are necessary, /// depending on how many args and local variables the callee has, and whether the callee makes /// its own calls. /// /// - Caller: /// - Place function args into $rarg0 - $rargN and if necessary the stack. /// - Allocate the return value on the stack if it's a reference type. /// - Place the return address into $reta /// - Jump to function address. /// - If necessary restore the stack to free args. /// - Callee: /// - Save general purpose registers to the stack. /// - Save the args registers, return value pointer and return address. /// - Save room on the stack for locals. /// - (Do work.) /// - Put the result in return value. /// - Restore the stack to free locals. /// - Restore the return address. /// - Restore the general purpose registers from the stack. /// - Jump to the return address. /// /// When a function has more than NUM_ARG_REGISTERS, the last arg register /// is used to point to the stack location of the remaining arguments. /// Stack space for the extra arguments is allocated in the caller when /// locals of the caller are allocated. impl FuelAsmBuilder<'_, '_> { pub(super) fn compile_call( &mut self, instr_val: &Value, function: &Function, args: &[Value], ) -> Result<(), CompileError> { // Put the args into the args registers. if args.len() <= compiler_constants::NUM_ARG_REGISTERS as usize { for (idx, arg_val) in args.iter().enumerate() { let arg_reg = self.value_to_register(arg_val)?; self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), arg_reg, format!("[call]: pass argument {idx}"), self.md_mgr.val_to_span(self.context, *arg_val), )); } } else { // Register ARG_REGS[NUM_ARG_REGISTERS-1] must contain LocalsBase + locals_size // so that the callee can index the stack arguments from there. // It's also useful for us to save the arguments to the stack next. if self.locals_size_bytes() <= TWELVE_BITS { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::ADDI( VirtualRegister::Constant( ConstantRegister::ARG_REGS [(compiler_constants::NUM_ARG_REGISTERS - 1) as usize], ), VirtualRegister::Constant(ConstantRegister::LocalsBase), VirtualImmediate12::try_new(self.locals_size_bytes(), Span::dummy()) .expect("Stack size too big for these many arguments, cannot handle."), )), comment: "[call]: save address of stack arguments in last argument register" .to_string(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); } else { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::MOVI( VirtualRegister::Constant( ConstantRegister::ARG_REGS [(compiler_constants::NUM_ARG_REGISTERS - 1) as usize], ), VirtualImmediate18::try_new(self.locals_size_bytes(), Span::dummy()) .expect("Stack size too big for these many arguments, cannot handle."), )), comment: "[call]: temporarily save locals size to add up next".to_string(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::ADD( VirtualRegister::Constant( ConstantRegister::ARG_REGS [(compiler_constants::NUM_ARG_REGISTERS - 1) as usize], ), VirtualRegister::Constant(ConstantRegister::LocalsBase), VirtualRegister::Constant( ConstantRegister::ARG_REGS [(compiler_constants::NUM_ARG_REGISTERS - 1) as usize], ), )), comment: "[call]: save address of stack arguments in last argument register" .to_string(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); } // Put NUM_ARG_REGISTERS - 1 arguments into arg registers and rest into the stack. for (idx, arg_val) in args.iter().enumerate() { let arg_reg = self.value_to_register(arg_val)?; // Except for the last arg register, the others hold an argument. if idx < compiler_constants::NUM_ARG_REGISTERS as usize - 1 { self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), arg_reg, format!("[call]: pass argument {idx}"), self.md_mgr.val_to_span(self.context, *arg_val), )); } else { // All arguments [NUM_ARG_REGISTERS - 1 ..] go into the stack. assert!( self.locals_size_bytes().is_multiple_of(8), "The size of locals is not word aligned" ); let stack_offset = (idx as u64 + 1) - compiler_constants::NUM_ARG_REGISTERS as u64; let stack_offset_bytes = self.locals_size_bytes() + (stack_offset * 8); assert!( stack_offset_bytes < self.locals_size_bytes() + (self.max_num_extra_args() * 8) ); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::SW( VirtualRegister::Constant( ConstantRegister::ARG_REGS [compiler_constants::NUM_ARG_REGISTERS as usize - 1], ), arg_reg, VirtualImmediate12::try_new( stack_offset, self.md_mgr .val_to_span(self.context, *arg_val) .unwrap_or(Span::dummy()), ) .expect("Too many arguments, cannot handle."), )), comment: format!("[call]: pass argument {idx} via its stack slot"), owning_span: self.md_mgr.val_to_span(self.context, *arg_val), }); } } } // Jump to function and insert return label. let (fn_label, _) = self.func_to_labels(function); self.cur_bytecode.push(Op { opcode: Either::Right(OrganizationalOp::Jump { to: fn_label, type_: JumpType::Call, }), comment: format!("[call]: call {}", function.get_name(self.context)), owning_span: None, }); // Save the return value, if it is not of type unit. let ret_reg = self.reg_seqr.next(); if !function.get_return_type(self.context).is_unit(self.context) { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::MOVE( ret_reg.clone(), VirtualRegister::Constant(ConstantRegister::CallReturnValue), )), comment: "[call]: copy the return value".into(), owning_span: None, }); } else { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::MOVE( ret_reg.clone(), VirtualRegister::Constant(ConstantRegister::Zero), )), comment: "[call]: return unit value".into(), owning_span: None, }); } self.reg_map.insert(*instr_val, ret_reg); Ok(()) } pub(super) fn compile_ret_from_call( &mut self, instr_val: &Value, ret_val: &Value, ) -> Result<(), CompileError> { // Move the result (if there is one) into the return value register. let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); if !ret_val .get_type(self.context) .is_some_and(|t| t.is_unit(self.context)) { let ret_reg = self.value_to_register(ret_val)?; self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::CallReturnValue), ret_reg, "set return value", owning_span, )); } // Jump to the end of the function. let end_label = self .return_ctxs .last() .expect("Calls guaranteed to save return context.") .0; self.cur_bytecode.push(Op::jump_to_label(end_label)); Ok(()) } pub fn compile_function( &mut self, handler: &Handler, function: Function, ) -> Result<(), ErrorEmitted> { assert!( self.cur_bytecode.is_empty(), "can't do nested functions yet" ); if function.has_selector(self.context) { // Add a comment noting that this is a named contract method. self.cur_bytecode.push(Op::new_comment(format!( "contract method: {}, selector: 0x{}", function.get_name(self.context), function .get_selector(self.context) .unwrap() .into_iter() .fold("".to_string(), |output, b| { format!("{output}{b:02x}") }) ))); } let func_is_entry = function.is_entry(self.context); // Insert a function label. let (start_label, end_label) = self.func_to_labels(&function); let md = function.get_metadata(self.context); let span = self.md_mgr.md_to_span(self.context, md); let test_decl_index = self.md_mgr.md_to_test_decl_index(self.context, md); let test_decl_ref = match (&span, &test_decl_index) { (Some(span), Some(decl_index)) => Some(DeclRef::new( Ident::new(span.clone()), *decl_index, span.clone(), )), _ => None, }; let comment = format!( "--- start of function: {} ---", function.get_name(self.context) ); self.cur_bytecode.push(match &span { Some(span) => Op::jump_label_comment(start_label, span.clone(), comment), None => Op::unowned_jump_label_comment(start_label, comment), }); // Manage the call frame. if !func_is_entry { // Save any general purpose registers used here on the stack. self.cur_bytecode.push(Op { opcode: Either::Right(OrganizationalOp::PushAll(start_label)), comment: "save all registers".to_owned(), owning_span: span.clone(), }); } let locals_alloc_result = self.alloc_locals(function); if func_is_entry { self.compile_external_args(function) .map_err(|e| handler.emit_err(e))? } else { // Make copies of the arg registers. self.compile_fn_call_args(function) } let reta = self.reg_seqr.next(); // XXX only do this if this function makes calls if !func_is_entry { // Save $reta and $retv self.cur_bytecode.push(Op::register_move( reta.clone(), VirtualRegister::Constant(ConstantRegister::CallReturnAddress), "save return address", None, )); let retv = self.reg_seqr.next(); self.cur_bytecode.push(Op::register_move( retv.clone(), VirtualRegister::Constant(ConstantRegister::CallReturnValue), "save return value", None, )); // Store some info describing the call frame. self.return_ctxs.push((end_label, retv)); } self.init_locals(locals_alloc_result); // Compile instructions. Traverse the IR blocks in reverse post order. This guarantees that // each block is processed after all its CFG predecessors have been processed. let po = sway_ir::dominator::compute_post_order(self.context, &function); for block in po.po_to_block.iter().rev() { let label = self.block_to_label(block); self.cur_bytecode.push(Op::unowned_jump_label(label)); self.compile_block(handler, block, func_is_entry)?; } if !func_is_entry { // Insert the end of function label. self.cur_bytecode.push(Op::unowned_jump_label(end_label)); // Pop the call frame entry. self.return_ctxs.pop(); // Free our stack allocated locals. This is unneeded for entries since they will have // actually returned to the calling context via a VM RET. self.drop_locals(); // Restore $reta. self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::CallReturnAddress), reta, "restore return address", None, )); // Restore GP regs. self.cur_bytecode.push(Op { opcode: Either::Right(OrganizationalOp::PopAll(start_label)), comment: "restore all registers".to_owned(), owning_span: None, }); // Jump to the return address. self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::JAL( ConstantRegister::Zero.into(), ConstantRegister::CallReturnAddress.into(), VirtualImmediate12::new(0), )), comment: "return from call".into(), owning_span: None, }); } // Save this function. let mut ops = Vec::new(); ops.append(&mut self.cur_bytecode); if func_is_entry { self.entries .push((function, start_label, ops, test_decl_ref)); } else { self.non_entries.push(ops); } Ok(()) } fn compile_fn_call_args(&mut self, function: Function) { if function.num_args(self.context) <= compiler_constants::NUM_ARG_REGISTERS as usize { // All arguments are passed through registers. for (idx, (arg_name, arg_val)) in function.args_iter(self.context).enumerate() { // Make a copy of the args in case we make calls and need to use the arg registers. let arg_copy_reg = self.reg_seqr.next(); self.cur_bytecode.push(Op::register_move( arg_copy_reg.clone(), VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), format!("save argument {idx} ({arg_name})"), self.md_mgr.val_to_span(self.context, *arg_val), )); // Remember our arg copy. self.reg_map.insert(*arg_val, arg_copy_reg); } } else { // Get NUM_ARG_REGISTERS - 1 arguments from arg registers and rest from the stack. for (idx, (arg_name, arg_val)) in function.args_iter(self.context).enumerate() { let arg_copy_reg = self.reg_seqr.next(); // Except for the last arg register, the others hold an argument. if idx < compiler_constants::NUM_ARG_REGISTERS as usize - 1 { // Make a copy of the args in case we make calls and need to use the arg registers. self.cur_bytecode.push(Op::register_move( arg_copy_reg.clone(), VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), format!("save argument {idx} ({arg_name})"), self.md_mgr.val_to_span(self.context, *arg_val), )); } else { // All arguments [NUM_ARG_REGISTERS - 1 ..] go into the stack. assert!( self.locals_size_bytes().is_multiple_of(8), "The size of locals is not word aligned" ); let stack_offset = (idx as u64 + 1) - compiler_constants::NUM_ARG_REGISTERS as u64; self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LW( arg_copy_reg.clone(), VirtualRegister::Constant( ConstantRegister::ARG_REGS [compiler_constants::NUM_ARG_REGISTERS as usize - 1], ), VirtualImmediate12::try_new( stack_offset, self.md_mgr .val_to_span(self.context, *arg_val) .unwrap_or(Span::dummy()), ) .expect("Too many arguments, cannot handle."), )), comment: format!("load argument {idx} ({arg_name}) from its stack slot"), owning_span: self.md_mgr.val_to_span(self.context, *arg_val), }); } // Remember our arg copy. self.reg_map.insert(*arg_val, arg_copy_reg); } } } // Handle loading the arguments of a contract call fn compile_external_args(&mut self, function: Function) -> Result<(), CompileError> { match function.args_iter(self.context).count() { // Nothing to do if there are no arguments 0 => Ok(()), // A special case for when there's only a single arg, its value (or address) is placed // directly in the base register. 1 => { let (_, val) = function.args_iter(self.context).next().unwrap(); let single_arg_reg = self.reg_seqr.next(); match self.program_kind { ProgramKind::Contract => { self.read_args_base_from_frame(&single_arg_reg); } ProgramKind::Library => {} // Nothing to do here ProgramKind::Script | ProgramKind::Predicate => { if let ProgramKind::Predicate = self.program_kind { self.read_args_base_from_predicate_data(&single_arg_reg); } else { self.read_args_base_from_script_data(&single_arg_reg); } // The base is an offset. Dereference it. // XXX val.get_type() should be a pointer if it's not meant to be loaded. if val .get_type(self.context) .is_some_and(|t| self.is_copy_type(&t)) { self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::LW( single_arg_reg.clone(), single_arg_reg.clone(), VirtualImmediate12::new(0), )), comment: "load main function parameter".into(), owning_span: None, }); } } } self.reg_map.insert(*val, single_arg_reg); Ok(()) } // Otherwise, the args are bundled together and pointed to by the base register. _ => { let args_base_reg = self.reg_seqr.next(); match self.program_kind { ProgramKind::Contract => self.read_args_base_from_frame(&args_base_reg), ProgramKind::Library => return Ok(()), // Nothing to do here ProgramKind::Predicate => { self.read_args_base_from_predicate_data(&args_base_reg) } ProgramKind::Script => self.read_args_base_from_script_data(&args_base_reg), } // Successively load each argument. The asm generated depends on the arg type size // and whether the offset fits in a 12-bit immediate. let mut arg_word_offset = 0; for (name, val) in function.args_iter(self.context) { let current_arg_reg = self.reg_seqr.next(); // The function arg type might be a pointer, but the value in the struct will // be of the pointed to type. So strip the pointer if necessary. let arg_type = val .get_type(self.context) .map(|ty| ty.get_pointee_type(self.context).unwrap_or(ty)) .unwrap(); let arg_type_size = arg_type.size(self.context); if self.is_copy_type(&arg_type) { if arg_word_offset > compiler_constants::TWELVE_BITS { let offs_reg = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::ADD( args_base_reg.clone(), args_base_reg.clone(), offs_reg.clone(), )), comment: format!("get offset of argument {name}"), owning_span: None, }); if arg_type_size.in_bytes() == 1 { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LB( current_arg_reg.clone(), offs_reg, VirtualImmediate12::new(0), )), comment: format!("get argument {name}"), owning_span: None, }); } else { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LW( current_arg_reg.clone(), offs_reg, VirtualImmediate12::new(0), )), comment: format!("get argument {name}"), owning_span: None, }); } } else if arg_type_size.in_bytes() == 1 { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LB( current_arg_reg.clone(), args_base_reg.clone(), VirtualImmediate12::new(arg_word_offset * 8), )), comment: format!("get argument {name}"), owning_span: None, }); } else { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LW( current_arg_reg.clone(), args_base_reg.clone(), VirtualImmediate12::new(arg_word_offset), )), comment: format!("get argument {name}"), owning_span: None, }); } } else { self.immediate_to_reg( arg_word_offset * 8, current_arg_reg.clone(), Some(&args_base_reg), format!("get offset of argument {name}"), None, ); } arg_word_offset += arg_type_size.in_words(); self.reg_map.insert(*val, current_arg_reg); } Ok(()) } } } // Read the argument(s) base from the call frame. fn read_args_base_from_frame(&mut self, reg: &VirtualRegister) { self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::LW( reg.clone(), VirtualRegister::Constant(ConstantRegister::FramePointer), // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 VirtualImmediate12::new(74), )), comment: "get base register for method arguments".into(), owning_span: None, }); } // Read the argument(s) base from the script data. fn read_args_base_from_script_data(&mut self, reg: &VirtualRegister) { self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::GTF( reg.clone(), VirtualRegister::Constant(ConstantRegister::Zero), VirtualImmediate12::new(GTFArgs::ScriptData as u64), )), comment: "get base register for main function arguments".into(), owning_span: None, }); } /// Read the returns the base pointer for predicate data fn read_args_base_from_predicate_data(&mut self, base_reg: &VirtualRegister) { // Final label to jump to continue execution, once the predicate data pointer is // successfully found let success_label = self.reg_seqr.get_label(); // Use the `gm` instruction to get the index of the predicate. This is the index we're // going to use in the subsequent `gtf` instructions. let input_index = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::GM( input_index.clone(), VirtualImmediate18::new(3), )), comment: "get predicate index".into(), owning_span: None, }); // Find the type of the "Input" using `GTF`. The returned value is one of three possible // ones: // 0 -> Input Coin = 0, // 1 -> Input Contract, // 2 -> Input Message // We only care about input coins and input message. let input_type = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::GTF( input_type.clone(), input_index.clone(), VirtualImmediate12::new(GTFArgs::InputType as u64), )), comment: "get predicate input type".into(), owning_span: None, }); // Label to jump to if the input type is *not* zero, i.e. not "coin". Then do the jump. let input_type_not_coin_label = self.reg_seqr.get_label(); self.cur_bytecode.push(Op::jump_if_not_zero( input_type.clone(), input_type_not_coin_label, )); // If the input is indeed a "coin", then use `GTF` to get the "input coin predicate data // pointer" and store in the `base_reg` self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::GTF( base_reg.clone(), input_index.clone(), VirtualImmediate12::new(GTFArgs::InputCoinPredicateData as u64), )), comment: "get predicate input coin data pointer".into(), owning_span: None, }); // Now that we have the actual pointer, we can jump to the success label to continue // execution. self.cur_bytecode.push(Op::jump_to_label(success_label)); // Otherwise, insert the label to jump to if the input type is not a "coin". self.cur_bytecode .push(Op::unowned_jump_label(input_type_not_coin_label)); // Check if the input type is "message" by comparing the input type to a register // containing 2. let input_type_is_message = self.reg_seqr.next(); let two = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::MOVI(two.clone(), VirtualImmediate18::new(2))), comment: "[predicate input is message]: set register to 2 (Input::Message discriminator)" .into(), owning_span: None, }); self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::EQ( input_type_is_message.clone(), input_type, two, )), comment: "[predicate input is message]: check if input type is message".into(), owning_span: None, }); // Invert `input_type_is_message` to use in `jnzi` let input_type_not_message = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::XORI( input_type_not_message.clone(), input_type_is_message, VirtualImmediate12::new(1), )), comment: "[predicate input is message]: check if input type is not message".into(), owning_span: None, }); // Label to jump to if the input type is *not* 2, i.e. not "message" (and not "coin" since // we checked that earlier). Then do the jump. let input_type_not_message_label = self.reg_seqr.get_label(); self.cur_bytecode.push(Op::jump_if_not_zero_comment( input_type_not_message, input_type_not_message_label,
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/checks.rs
sway-core/src/asm_generation/fuel/checks.rs
//! Various checks and heuristics that are naively run on sequences of opcodes. //! //! This is _not_ the place for optimization passes. use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, }; use sway_types::Span; use crate::asm_lang::allocated_ops::{AllocatedInstruction, AllocatedOp}; /// Checks if an opcode is one that cannot be executed from within a script. /// If so, throw an error. /// One example of disallowed code is as follows: /// ```ignore /// pub fn burn(sub_id: SubId, amount: u64) { /// asm(r1: amount, r2: sub_id) { /// burn r1 r2; /// } /// } /// ``` pub(crate) fn check_script_opcodes( handler: &Handler, ops: &[AllocatedOp], ) -> Result<(), ErrorEmitted> { use AllocatedInstruction::*; // Abort compilation because the finalized asm contains opcodes invalid to a script. // Preemptively avoids the creation of scripts with opcodes not allowed at runtime. handler.scope(|handler| { for op in ops { match &op.opcode { GM(_, imm) if (1..=2).contains(&imm.value()) => { handler.emit_err(CompileError::GMFromExternalContext { span: get_op_span(op), }); } MINT(..) => { handler.emit_err(CompileError::MintFromExternalContext { span: get_op_span(op), }); } BURN(..) => { handler.emit_err(CompileError::BurnFromExternalContext { span: get_op_span(op), }); } SWW(..) | SRW(..) | SRWQ(..) | SWWQ(..) => { handler.emit_err(CompileError::ContractStorageFromExternalContext { span: get_op_span(op), }); } _ => (), } } Ok(()) }) } /// Checks if an opcode is one that cannot be executed from within a predicate. /// If so, throw an error. /// /// All contract opcodes are not allowed in predicates. Except for RVRT that can /// be used to abort the predicate. One example of disallowed code is as follows: /// ```ignore /// pub fn burn(sub_id: SubId, amount: u64) { /// asm(r1: amount) { /// burn r1; /// } /// } /// ``` /// /// Jumping backwards is not allowed in predicates so JMP and JNE are not allowed and /// the function verifies that the immediate of JI, JNEI, JNZI is greater than the opcode offset. /// /// See: https://fuellabs.github.io/fuel-specs/master/vm/index.html?highlight=predicate#predicate-verification pub(crate) fn check_predicate_opcodes( handler: &Handler, ops: &[AllocatedOp], ) -> Result<(), ErrorEmitted> { use AllocatedInstruction::*; // Abort compilation because the finalized asm contains opcodes invalid to a predicate. // Preemptively avoids the creation of predicates with opcodes not allowed at runtime. handler.scope(|handler| { for op in ops.iter() { let invalid_opcode = |name_str: &str| { handler.emit_err(CompileError::InvalidOpcodeFromPredicate { opcode: name_str.to_string(), span: get_op_span(op), }); }; match op.opcode.clone() { BAL(..) => invalid_opcode("BAL"), BHEI(..) => invalid_opcode("BHEI"), BHSH(..) => invalid_opcode("BHSH"), BURN(..) => invalid_opcode("BURN"), CALL(..) => invalid_opcode("CALL"), CB(..) => invalid_opcode("CB"), CCP(..) => invalid_opcode("CCP"), CROO(..) => invalid_opcode("CROO"), CSIZ(..) => invalid_opcode("CSIZ"), GM(_, imm) if (1..=2).contains(&imm.value()) => { handler.emit_err(CompileError::GMFromExternalContext { span: get_op_span(op), }); } LOG(..) => invalid_opcode("LOG"), LOGD(..) => invalid_opcode("LOGD"), MINT(..) => invalid_opcode("MINT"), RETD(..) => invalid_opcode("RETD"), SMO(..) => invalid_opcode("SMO"), SRW(..) => invalid_opcode("SRW"), SRWQ(..) => invalid_opcode("SRWQ"), SWW(..) => invalid_opcode("SWW"), SWWQ(..) => invalid_opcode("SWWQ"), TIME(..) => invalid_opcode("TIME"), TR(..) => invalid_opcode("TR"), TRO(..) => invalid_opcode("TRO"), _ => (), }; } Ok(()) }) } fn get_op_span(op: &AllocatedOp) -> Span { let default_span = sway_types::span::Span::new("no span found for opcode".into(), 0, 1, None).unwrap(); op.owning_span .clone() .unwrap_or_else(|| default_span.clone()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/mod.rs
sway-core/src/asm_generation/fuel/mod.rs
pub(crate) mod compiler_constants; pub(crate) mod data_section; pub(crate) mod register_allocator; pub(super) mod abstract_instruction_set; pub(super) mod allocated_abstract_instruction_set; pub(super) mod checks; pub(super) mod fuel_asm_builder; pub(super) mod programs; pub(super) mod register_sequencer; mod analyses; mod functions; mod globals_section; mod optimizations;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/analyses.rs
sway-core/src/asm_generation/fuel/analyses.rs
use std::collections::{BTreeSet, HashMap}; use either::Either; use indexmap::IndexSet; use sway_types::FxIndexSet; use crate::asm_lang::{ControlFlowOp, Label, Op, VirtualRegister}; /// Given a list of instructions `ops` of a program, do liveness analysis for the full program. /// /// A virtual registers is live at some point in the program if it has previously been defined by /// an instruction and will be used by an instruction in the future. /// /// The analysis function below assumes that it is possible that a virtual register is assigned /// more than once. That is, it doesn't assume that the intermediate assembly is in SSA form. /// /// Two tables are generated: `live_in` and `live_out`. Each row in the tables corresponds to an /// instruction in the program. /// * A virtual register is in the `live_out` table for a given instruction if it is live on any /// of that node's out-edges /// * A virtual register is in the `live_in` table for a given instruction if it is live on any /// of that node's in-edges /// /// /// Algorithm: /// =============================================================================================== /// for each instruction op: /// live_in(op) = {} /// live_out(op) = {} /// def(op) = list of virtual registers defined by op /// use(op) = list of virtual registers used by op /// /// repeat /// for each instruction op (traversed in reverse topological order of the CFG) /// prev_live_in(op) = live_in(op) /// prev_live_out(op) = live_out(op) /// live_out(op) = live_in(s_1) UNION live_in(s_2) UNION live_in(s_3) UNION ... /// where s_1, s_2, s_3, ... are all the successors of op in the CFG. /// live_in(op) = use(op) UNION (live_out(op) - def(op)) /// until prev_live_in(op) = live_in(op) /// AND prev_live_out(op) = live_out(op) /// =============================================================================================== /// /// If `ignore_constant_regs == true` then we only look at registers that have the enum variant /// VirtualRegister::Virtual(_). All other registers (i.e. ones with the /// VirtualRegister::Constant(_) variant) are assumed to be live throughout the full program. /// /// This function finally returns `live_out` because it has all the liveness information needed. /// `live_in` is computed because it is needed to compute `live_out` iteratively. /// pub(crate) fn liveness_analysis( ops: &[Op], ignore_constant_regs: bool, ) -> Vec<BTreeSet<VirtualRegister>> { // Vectors representing maps that will represent the live_in and live_out tables. Each entry // corresponds to an instruction in `ops`. let mut live_in: Vec<FxIndexSet<VirtualRegister>> = vec![IndexSet::default(); ops.len()]; let mut live_out: Vec<BTreeSet<VirtualRegister>> = vec![BTreeSet::default(); ops.len()]; let mut label_to_index: HashMap<Label, usize> = HashMap::new(); // Keep track of a map between jump labels and op indices. Useful to compute op successors. for (idx, op) in ops.iter().enumerate() { if let Either::Right(ControlFlowOp::Label(op_label)) = op.opcode { label_to_index.insert(op_label, idx); } } let mut modified = true; while modified { modified = false; // Iterate in reverse topological order of the CFG (which is basically the same as the // reverse order of `ops`. This makes the outer `while` loop converge faster. for (ix, op) in ops.iter().rev().enumerate() { let mut local_modified = false; let rev_ix = ops.len() - ix - 1; // Get use and def vectors without any of the Constant registers let mut op_use = op.use_registers(); let mut op_def = op.def_registers(); if ignore_constant_regs { op_use.retain(|&reg| reg.is_virtual()); op_def.retain(|&reg| reg.is_virtual()); } // Compute live_out(op) = live_in(s_1) UNION live_in(s_2) UNION ..., where s1, s_2, ... // are successors of op for s in &op.successors(rev_ix, ops, &label_to_index) { for l in live_in[*s].iter() { local_modified |= live_out[rev_ix].insert(l.clone()); } } // Compute live_in(op) = use(op) UNION (live_out(op) - def(op)) // Add use(op) for u in op_use { local_modified |= live_in[rev_ix].insert(u.clone()); } // Add live_out(op) - def(op) for l in live_out[rev_ix].iter() { if !op_def.contains(&l) { local_modified |= live_in[rev_ix].insert(l.clone()); } } // Did anything change in this iteration? modified |= local_modified; } } live_out }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/fuel_asm_builder.rs
sway-core/src/asm_generation/fuel/fuel_asm_builder.rs
use super::{ data_section::EntryName, globals_section::GlobalsSection, programs::{AbstractEntry, AbstractProgram}, }; use crate::{ asm_generation::{ asm_builder::AsmBuilder, from_ir::{StateAccessType, Storage}, fuel::{ abstract_instruction_set::AbstractInstructionSet, compiler_constants, data_section::{DataId, DataSection, Entry}, register_sequencer::RegisterSequencer, }, FinalizedAsm, ProgramKind, }, asm_lang::{ virtual_register::*, JumpType, Label, Op, VirtualImmediate06, VirtualImmediate12, VirtualImmediate18, VirtualOp, WideCmp, WideOperations, }, decl_engine::DeclRefFunction, metadata::MetadataManager, BuildConfig, }; use sway_error::{ error::CompileError, handler::{ErrorEmitted, Handler}, warning::CompileWarning, warning::Warning, }; use sway_ir::*; use sway_types::{span::Span, Spanned}; use either::Either; use std::collections::HashMap; pub struct FuelAsmBuilder<'ir, 'eng> { pub(super) program_kind: ProgramKind, // Data section is used by the rest of code gen to layout const memory. pub(super) data_section: DataSection, // Globals will be allocated at SSP uninitialized (they will not be zeroed) pub(super) globals_section: GlobalsSection, // Maps configurable name to data id, only used by encoding v0 pub(super) configurable_v0_data_id: HashMap<String, DataId>, // Register sequencer dishes out new registers and labels. pub(super) reg_seqr: RegisterSequencer, // Label maps are from IR functions or blocks to label name. Functions have a start and end // label. pub(super) func_label_map: HashMap<Function, (Label, Label)>, pub(super) block_label_map: HashMap<Block, Label>, // Reg map is tracking IR values to VM values. Ptr map is tracking IR pointers to local // storage types. pub(super) reg_map: HashMap<Value, VirtualRegister>, pub(super) ptr_map: HashMap<LocalVar, Storage>, // PHIs need a register to which predecessor blocks will copy the value to. // That VirtualRegister is then copied to another one in the block, mapped by reg_map. pub(super) phi_reg_map: HashMap<Value, VirtualRegister>, // The currently compiled function has an end label which is at the end of the function body // but before the call cleanup, and a copy of the $retv for when the return value is a reference // type and must be copied in memory. Unless we have nested function declarations this vector // will usually have 0 or 1 entry. pub(super) return_ctxs: Vec<(Label, VirtualRegister)>, // Stack size and base register for locals and num_extra_args in any call in the function. pub(super) locals_ctxs: Vec<(u64, VirtualRegister, u64)>, // IR context we're compiling. pub(super) context: &'ir Context<'eng>, // Metadata manager for converting metadata to Spans, etc. pub(super) md_mgr: MetadataManager, // Final resulting VM bytecode ops; entry functions with their function and label, and regular // non-entry functions. pub(super) entries: Vec<(Function, Label, Vec<Op>, Option<DeclRefFunction>)>, pub(super) non_entries: Vec<Vec<Op>>, // In progress VM bytecode ops. pub(super) cur_bytecode: Vec<Op>, // Instructions that will be appended after globals allocation, but before the entry function is called. pub(super) before_entries: Vec<Op>, } impl AsmBuilder for FuelAsmBuilder<'_, '_> { fn func_to_labels(&mut self, func: &Function) -> (Label, Label) { self.func_to_labels(func) } fn compile_configurable(&mut self, config: &ConfigContent) { match config { ConfigContent::V0 { name, constant, .. } => { let entry = Entry::from_constant( self.context, constant.get_content(self.context), EntryName::Configurable(name.clone()), None, ); let dataid = self.data_section.insert_data_value(entry); self.configurable_v0_data_id.insert(name.clone(), dataid); } ConfigContent::V1 { name, ty, encoded_bytes, decode_fn, .. } => { let size_in_bytes = ty.size(self.context).in_bytes(); self.globals_section.insert(name, size_in_bytes); let global = self.globals_section.get_by_name(name).unwrap(); let (decode_fn_label, _) = self.func_label_map.get(&decode_fn.get()).unwrap(); let dataid = self.data_section.insert_data_value(Entry::new_byte_array( encoded_bytes.clone(), EntryName::Configurable(name.clone()), None, )); self.before_entries.push(Op { opcode: Either::Left(VirtualOp::AddrDataId( VirtualRegister::Constant(ConstantRegister::FuncArg0), dataid, )), comment: format!("get pointer to configurable {name} default value"), owning_span: None, }); self.before_entries.push(Op { opcode: Either::Left(VirtualOp::ADDI( VirtualRegister::Constant(ConstantRegister::FuncArg1), VirtualRegister::Constant(ConstantRegister::Zero), VirtualImmediate12::new(encoded_bytes.len() as u64), )), comment: format!("get length of configurable {name} default value"), owning_span: None, }); self.before_entries.push(Op { opcode: Either::Left(VirtualOp::ADDI( VirtualRegister::Constant(ConstantRegister::FuncArg2), VirtualRegister::Constant(ConstantRegister::StackStartPointer), VirtualImmediate12::new(global.offset_in_bytes), )), comment: format!("get pointer to configurable {name} stack address"), owning_span: None, }); // call decode self.before_entries.push(Op { opcode: Either::Right(crate::asm_lang::ControlFlowOp::Jump { to: *decode_fn_label, type_: JumpType::Call, }), comment: format!("decode configurable {name}"), owning_span: None, }); } } } fn compile_function( &mut self, handler: &Handler, function: Function, ) -> Result<(), ErrorEmitted> { self.compile_function(handler, function) } fn finalize( self, handler: &Handler, build_config: Option<&BuildConfig>, fallback_fn: Option<Label>, ) -> Result<FinalizedAsm, ErrorEmitted> { let FuelAsmBuilder { program_kind, data_section, globals_section, reg_seqr, context, entries, non_entries, before_entries: before_entry, .. } = self; let opt_level = build_config .map(|cfg| cfg.optimization_level) .unwrap_or_default(); let entries = entries .clone() .into_iter() .map(|(f, l, ops, test_decl_ref)| (f, l, AbstractInstructionSet { ops }, test_decl_ref)) .collect::<Vec<_>>(); let non_entries = non_entries .clone() .into_iter() .map(|ops| AbstractInstructionSet { ops }) .collect::<Vec<_>>(); let entries = entries .into_iter() .map(|(func, label, ops, test_decl_ref)| { let selector = func.get_selector(context); let name = func.get_name(context).to_string(); AbstractEntry { test_decl_ref, selector, label, ops, name, } }) .collect(); let before_entry = AbstractInstructionSet { ops: before_entry }; let virtual_abstract_program = AbstractProgram::new( program_kind, data_section, globals_section, before_entry, entries, non_entries, reg_seqr, context.experimental, ); // Compiled dependencies will not have any content and we // do not want to display their empty ASM structures. // If printing ASM is requested, we want to emit the // actual ASMs generated for the whole program. let program_has_content = !virtual_abstract_program.is_empty(); if build_config .map(|cfg| cfg.print_asm.virtual_abstract && program_has_content) .unwrap_or(false) { println!(";; ASM: Virtual abstract program"); println!("{virtual_abstract_program}\n"); } let allocated_program = virtual_abstract_program .into_allocated_program(fallback_fn, opt_level) .map_err(|e| handler.emit_err(e))?; if build_config .map(|cfg| cfg.print_asm.allocated_abstract && program_has_content) .unwrap_or(false) { println!(";; ASM: Allocated abstract program"); println!("{allocated_program}"); } let final_program = allocated_program .into_final_program() .map_err(|e| handler.emit_err(e))?; if build_config .map(|cfg| cfg.print_asm.r#final && program_has_content) .unwrap_or(false) { println!(";; ASM: Final program"); println!("{final_program}"); } Ok(final_program.finalize()) } } impl<'ir, 'eng> FuelAsmBuilder<'ir, 'eng> { pub fn new( program_kind: ProgramKind, data_section: DataSection, reg_seqr: RegisterSequencer, context: &'ir Context<'eng>, ) -> Self { FuelAsmBuilder { program_kind, data_section, globals_section: GlobalsSection::default(), configurable_v0_data_id: HashMap::default(), reg_seqr, func_label_map: HashMap::new(), block_label_map: HashMap::new(), reg_map: HashMap::new(), ptr_map: HashMap::new(), phi_reg_map: HashMap::new(), return_ctxs: Vec::new(), locals_ctxs: Vec::new(), context, md_mgr: MetadataManager::default(), entries: Vec::new(), non_entries: Vec::new(), cur_bytecode: Vec::new(), before_entries: vec![], } } pub(super) fn compile_block( &mut self, handler: &Handler, block: &Block, func_is_entry: bool, ) -> Result<(), ErrorEmitted> { if block .get_function(self.context) .get_entry_block(self.context) != *block { // If the block has an arg, copy value from its phi_reg_map vreg to a new one. for arg in block.arg_iter(self.context) { let phi_reg = self.phi_reg_map.entry(*arg).or_insert(self.reg_seqr.next()); // Associate a new virtual register for this arg and copy phi_reg to it. let arg_reg = self.reg_seqr.next(); self.reg_map.insert(*arg, arg_reg.clone()); self.cur_bytecode.push(Op::register_move( arg_reg.clone(), phi_reg.clone(), "move parameter from branch to block argument", None, )); } } let module = block.get_function(self.context).get_module(self.context); for instr_val in block.instruction_iter(self.context) { self.compile_instruction(handler, &instr_val, func_is_entry, module)?; } Ok(()) } pub(super) fn compile_instruction( &mut self, handler: &Handler, instr_val: &Value, func_is_entry: bool, module: Module, ) -> Result<(), ErrorEmitted> { let Some(instruction) = instr_val.get_instruction(self.context) else { return Err(handler.emit_err(CompileError::Internal( "Value is not an instruction.", self.md_mgr .val_to_span(self.context, *instr_val) .unwrap_or_else(Span::dummy), ))); }; // The only instruction whose compilation returns a Result itself is AsmBlock, which // we special-case here. Ideally, the ASM block verification would happen much sooner, // perhaps during parsing. https://github.com/FuelLabs/sway/issues/801 if let InstOp::AsmBlock(asm, args) = &instruction.op { self.compile_asm_block(handler, instr_val, asm, args) } else { // These matches all return `Result<(), CompileError>`. match &instruction.op { InstOp::AsmBlock(..) => unreachable!("Handled immediately above."), InstOp::BitCast(val, ty) => self.compile_bitcast(instr_val, val, ty), InstOp::UnaryOp { op, arg } => self.compile_unary_op(instr_val, op, arg), InstOp::BinaryOp { op, arg1, arg2 } => { self.compile_binary_op(instr_val, op, arg1, arg2) } InstOp::Branch(to_block) => self.compile_branch(to_block), InstOp::Call(func, args) => self.compile_call(instr_val, func, args), InstOp::CastPtr(val, _ty) => self.compile_no_op_move(instr_val, val), InstOp::Cmp(pred, lhs_value, rhs_value) => { self.compile_cmp(instr_val, pred, lhs_value, rhs_value) } InstOp::ConditionalBranch { cond_value, true_block, false_block, } => self.compile_conditional_branch(cond_value, true_block, false_block), InstOp::ContractCall { params, coins, asset_id, gas, .. } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), InstOp::FuelVm(fuel_vm_instr) => match fuel_vm_instr { FuelVmInstruction::Gtf { index, tx_field_id } => { self.compile_gtf(instr_val, index, *tx_field_id) } FuelVmInstruction::Log { log_val, log_ty, log_id, log_data, } => self.compile_log(instr_val, log_val, log_ty, log_id, log_data), FuelVmInstruction::ReadRegister(reg) => { self.compile_read_register(instr_val, reg); Ok(()) } FuelVmInstruction::Revert(revert_val) => { self.compile_revert(instr_val, revert_val) } FuelVmInstruction::Smo { recipient, message, message_size, coins, } => self.compile_smo(instr_val, recipient, message, message_size, coins), FuelVmInstruction::StateClear { key, number_of_slots, } => self.compile_state_clear(instr_val, key, number_of_slots), FuelVmInstruction::StateLoadQuadWord { load_val, key, number_of_slots, } => self.compile_state_access_quad_word( instr_val, load_val, key, number_of_slots, StateAccessType::Read, ), FuelVmInstruction::StateLoadWord(key) => { self.compile_state_load_word(instr_val, key) } FuelVmInstruction::StateStoreQuadWord { stored_val, key, number_of_slots, } => self.compile_state_access_quad_word( instr_val, stored_val, key, number_of_slots, StateAccessType::Write, ), FuelVmInstruction::StateStoreWord { stored_val, key } => { self.compile_state_store_word(instr_val, stored_val, key) } // Wide operations FuelVmInstruction::WideUnaryOp { op, result, arg } => { self.compile_wide_unary_op(instr_val, op, arg, result) } FuelVmInstruction::WideBinaryOp { op, result, arg1, arg2, } => self.compile_wide_binary_op(instr_val, op, arg1, arg2, result), FuelVmInstruction::WideCmpOp { op, arg1, arg2 } => { self.compile_wide_cmp_op(instr_val, op, arg1, arg2) } FuelVmInstruction::WideModularOp { op, result, arg1, arg2, arg3, } => self.compile_wide_modular_op(instr_val, op, result, arg1, arg2, arg3), FuelVmInstruction::JmpMem => self.compile_jmp_mem(instr_val), FuelVmInstruction::Retd { ptr, len } => self.compile_retd(instr_val, ptr, len), }, InstOp::GetElemPtr { base, elem_ptr_ty, indices, } => self.compile_get_elem_ptr(instr_val, base, elem_ptr_ty, indices), InstOp::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), InstOp::GetGlobal(global_var) => { self.compile_get_global(instr_val, global_var, module) } InstOp::GetConfig(_, name) => self.compile_get_config(instr_val, name), InstOp::GetStorageKey(storage_key) => { self.compile_get_storage_key(instr_val, storage_key, module) } InstOp::IntToPtr(val, _) => self.compile_no_op_move(instr_val, val), InstOp::Load(src_val) => self.compile_load(instr_val, src_val), InstOp::Alloc { ty, count } => self.compile_alloc(instr_val, ty, count), InstOp::MemCopyBytes { dst_val_ptr, src_val_ptr, byte_len, } => self.compile_mem_copy_bytes(instr_val, dst_val_ptr, src_val_ptr, *byte_len), InstOp::MemCopyVal { dst_val_ptr, src_val_ptr, } => self.compile_mem_copy_val(instr_val, dst_val_ptr, src_val_ptr), InstOp::MemClearVal { dst_val_ptr } => { self.compile_mem_clear_val(instr_val, dst_val_ptr) } InstOp::Nop => Ok(()), InstOp::PtrToInt(ptr_val, _int_ty) => self.compile_no_op_move(instr_val, ptr_val), InstOp::Ret(ret_val, ty) => { if func_is_entry { self.compile_ret_from_entry(instr_val, ret_val, ty) } else { self.compile_ret_from_call(instr_val, ret_val) } } InstOp::Store { dst_val_ptr, stored_val, } => self.compile_store(instr_val, dst_val_ptr, stored_val), } .map_err(|e| handler.emit_err(e)) } } fn compile_asm_block( &mut self, handler: &Handler, instr_val: &Value, asm: &AsmBlock, asm_args: &[AsmArg], ) -> Result<(), ErrorEmitted> { let mut inline_reg_map = HashMap::new(); let mut inline_ops = Vec::new(); for AsmArg { name, initializer } in asm_args { if ConstantRegister::parse_register_name(name.as_str()).is_some() { handler.emit_warn(CompileWarning { span: name.span().clone(), warning_content: Warning::ShadowingReservedRegister { reg_name: name.clone(), }, }); } let arg_reg = match initializer { Some(init_val) => { let init_val_reg = match self.value_to_register(init_val) { Ok(ivr) => ivr, Err(e) => { return Err(handler.emit_err(e)); } }; match init_val_reg { VirtualRegister::Virtual(_) => init_val_reg, VirtualRegister::Constant(_) => { let const_copy = self.reg_seqr.next(); inline_ops.push(Op { opcode: Either::Left(VirtualOp::MOVE( const_copy.clone(), init_val_reg, )), comment: "copy ASM block argument's constant initial value to register" .into(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); const_copy } } } None => self.reg_seqr.next(), }; inline_reg_map.insert(name.as_str(), arg_reg); } let realize_register = |reg_name: &str| { inline_reg_map.get(reg_name).cloned().or_else(|| { ConstantRegister::parse_register_name(reg_name).map(VirtualRegister::Constant) }) }; // For each opcode in the asm expression, attempt to parse it into an opcode and // replace references to the above registers with the newly allocated ones. let asm_block = asm; for op in &asm_block.body { let replaced_registers = op .args .iter() .map(|reg_name| -> Result<_, CompileError> { realize_register(reg_name.as_str()).ok_or_else(|| { CompileError::UnknownRegister { span: reg_name.span(), initialized_registers: inline_reg_map .keys() .copied() .collect::<Vec<_>>() .join("\n"), } }) }) .filter_map(|res| match res { Err(e) => { handler.emit_err(e); None } Ok(o) => Some(o), }) .collect::<Vec<VirtualRegister>>(); // Parse the actual op and registers. let op_span = self .md_mgr .md_to_span(self.context, op.metadata) .unwrap_or_else(Span::dummy); let opcode = Op::parse_opcode( handler, &op.op_name, &replaced_registers, &op.immediate, op_span.clone(), )?; inline_ops.push(Op { opcode: either::Either::Left(opcode), comment: op_span.as_str().into(), owning_span: Some(op_span), }); } // ASM block always returns a value. The return value is either the one contained in // the return register specified at the end of the ASM block, or it is unit, `()`, in // the case of an ASM block without the return register specified. let (ret_reg, comment) = if let Some(ret_reg_name) = &asm_block.return_name { // If the return register is specified, lookup it by name. let ret_reg = match realize_register(ret_reg_name.as_str()) { Some(reg) => reg, None => { return Err(handler.emit_err(CompileError::UnknownRegister { initialized_registers: inline_reg_map .keys() .map(|name| name.to_string()) .collect::<Vec<_>>() .join("\n"), span: ret_reg_name.span(), })); } }; ( ret_reg, format!("return value from ASM block with return register {ret_reg_name}"), ) } else { // If the return register is not specified, the return value is unit, `()`, and we // move constant register $zero to the final instruction register. if !asm_block.return_type.is_unit(self.context) { return Err(handler.emit_err(CompileError::InternalOwned( format!("Return type of an ASM block without return register must be unit, but it was {}.", asm_block.return_type.as_string(self.context)), self.md_mgr .val_to_span(self.context, *instr_val) .unwrap_or_else(Span::dummy), ))); } ( VirtualRegister::Constant(ConstantRegister::Zero), "return unit value from ASM block without return register".into(), ) }; // Move the return register to the instruction register. let instr_reg = self.reg_seqr.next(); inline_ops.push(Op { opcode: Either::Left(VirtualOp::MOVE(instr_reg.clone(), ret_reg)), comment, owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); self.reg_map.insert(*instr_val, instr_reg); self.cur_bytecode.append(&mut inline_ops); Ok(()) } fn compile_bitcast( &mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type, ) -> Result<(), CompileError> { let val_reg = self.value_to_register(bitcast_val)?; let reg = if to_type.is_bool(self.context) { // We treat only one as `true`, and not every non-zero value. // So, every non-zero value must be converted to one. let res_reg = self.reg_seqr.next(); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::EQ( res_reg.clone(), val_reg, VirtualRegister::Constant(ConstantRegister::Zero), )), comment: "[bitcast to bool]: convert value to inverted boolean".into(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::XORI( res_reg.clone(), res_reg.clone(), VirtualImmediate12::new(1), )), comment: "[bitcast to bool]: invert inverted boolean".into(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); res_reg } else if to_type.is_unit(self.context) { // Unit is represented as zero. VirtualRegister::Constant(ConstantRegister::Zero) } else { // For all other values, bitcast is a no-op. val_reg }; self.reg_map.insert(*instr_val, reg); Ok(()) } fn compile_unary_op( &mut self, instr_val: &Value, op: &UnaryOpKind, arg: &Value, ) -> Result<(), CompileError> { let val_reg = self.value_to_register(arg)?; let res_reg = self.reg_seqr.next(); let opcode = match op { UnaryOpKind::Not => Either::Left(VirtualOp::NOT(res_reg.clone(), val_reg)), }; self.cur_bytecode.push(Op { opcode, comment: String::new(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); self.reg_map.insert(*instr_val, res_reg); Ok(()) } fn compile_wide_unary_op( &mut self, instr_val: &Value, op: &UnaryOpKind, arg: &Value, result: &Value, ) -> Result<(), CompileError> { let result_reg = self.value_to_register(result)?; let val1_reg = self.value_to_register(arg)?; let opcode = match op { UnaryOpKind::Not => VirtualOp::WQOP( result_reg, val1_reg, VirtualRegister::Constant(ConstantRegister::Zero), VirtualImmediate06::wide_op(crate::asm_lang::WideOperations::Not, false), ), }; self.cur_bytecode.push(Op { opcode: Either::Left(opcode), comment: String::new(), owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); Ok(()) } fn compile_wide_binary_op( &mut self, instr_val: &Value, op: &BinaryOpKind, arg1: &Value, arg2: &Value, result: &Value, ) -> Result<(), CompileError> { let result_reg = self.value_to_register(result)?; let val1_reg = self.value_to_register(arg1)?; let val2_reg = self.value_to_register(arg2)?; let opcode = match op { BinaryOpKind::Add => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Add, true), ), BinaryOpKind::Sub => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Sub, true), ), BinaryOpKind::And => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::And, true), ), BinaryOpKind::Or => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Or, true), ), BinaryOpKind::Xor => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Xor, true), ), BinaryOpKind::Lsh => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Lsh, false), ), BinaryOpKind::Rsh => VirtualOp::WQOP( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_op(WideOperations::Rsh, false), ), BinaryOpKind::Mul => VirtualOp::WQML( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_mul(true, true), ), BinaryOpKind::Div => VirtualOp::WQDV( result_reg, val1_reg, val2_reg, VirtualImmediate06::wide_div(true), ), _ => todo!(), }; self.cur_bytecode.push(Op {
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/globals_section.rs
sway-core/src/asm_generation/fuel/globals_section.rs
pub struct GlobalContent { pub name: String, pub size_in_bytes: u64, pub offset_in_bytes: u64, } #[derive(Default)] pub struct GlobalsSection { entries: Vec<GlobalContent>, current_offset_in_bytes: u64, } impl GlobalsSection { pub fn insert(&mut self, name: &str, size_in_bytes: u64) { let g = GlobalContent { name: name.to_string(), size_in_bytes, offset_in_bytes: self.current_offset_in_bytes, }; self.entries.push(g); self.current_offset_in_bytes += size_in_bytes; } pub fn len_in_bytes(&self) -> u64 { self.entries.iter().map(|x| x.size_in_bytes).sum() } pub fn get_by_name(&self, name: &str) -> Option<&GlobalContent> { self.entries.iter().find(|x| x.name == name) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/register_sequencer.rs
sway-core/src/asm_generation/fuel/register_sequencer.rs
use crate::asm_lang::{Label, VirtualRegister}; /// The [RegisterSequencer] is basically an iterator over integers -- it distributes unique ids in /// the form of integers while ASM is being generated to ensure a monotonically increasing unique /// register Id for every virtual register that is used. #[derive(Clone, Copy, Default)] pub struct RegisterSequencer { next_register: usize, next_jump_label: usize, } impl RegisterSequencer { pub(crate) fn new() -> Self { Default::default() } /// Choosing to not use the iterator trait, because this iterator goes on forever and thusly /// does not need to return an `Option<Item>`. pub(crate) fn next(&mut self) -> VirtualRegister { let next_val = self.next_register; self.next_register += 1; VirtualRegister::Virtual(next_val.to_string()) } pub(crate) fn get_label(&mut self) -> Label { let next_val = self.next_jump_label; self.next_jump_label += 1; Label(next_val) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/compiler_constants.rs
sway-core/src/asm_generation/fuel/compiler_constants.rs
/// The total number of registers available and the number of registers available for the compiler /// to use. Registers reserved by the compiler are contained within these. const NUM_TOTAL_REGISTERS: u8 = 64; const NUM_FREE_REGISTERS: u8 = 48; /// This is the number of registers reserved by the compiler. Adjust this number if a new /// reservation must be made. /// So far, the compiler-reserved registers are: /// 1. DATA_SECTION_BEGIN - the offset to the read only data section. /// 2. RETURN_ADDRESS - where a function must return to. /// 3. RETURN_VALUE - the value returned by a _function_ call. /// 4. SCRATCH - used for certain operations which need a register temporarily, such as JMP. /// 5. LOCALS_BASE - base register for stack locals. /// 6. ARGS - for passing arguments to function calls. const NUM_COMPILER_RESERVED_REGISTERS: u8 = 5 + NUM_ARG_REGISTERS; pub(crate) const DATA_SECTION_REGISTER: u8 = NUM_TOTAL_REGISTERS - 1; pub(crate) const RETURN_ADDRESS_REGISTER: u8 = NUM_TOTAL_REGISTERS - 2; pub(crate) const RETURN_VALUE_REGISTER: u8 = NUM_TOTAL_REGISTERS - 3; pub(crate) const SCRATCH_REGISTER: u8 = NUM_TOTAL_REGISTERS - 4; pub(crate) const LOCALS_BASE: u8 = NUM_TOTAL_REGISTERS - 5; pub(crate) const NUM_ARG_REGISTERS: u8 = 6; pub(crate) const ARG_REG0: u8 = NUM_TOTAL_REGISTERS - 6; pub(crate) const ARG_REG1: u8 = NUM_TOTAL_REGISTERS - 7; pub(crate) const ARG_REG2: u8 = NUM_TOTAL_REGISTERS - 8; pub(crate) const ARG_REG3: u8 = NUM_TOTAL_REGISTERS - 9; pub(crate) const ARG_REG4: u8 = NUM_TOTAL_REGISTERS - 10; pub(crate) const ARG_REG5: u8 = NUM_TOTAL_REGISTERS - 11; pub(crate) const NUM_ALLOCATABLE_REGISTERS: u8 = NUM_FREE_REGISTERS - NUM_COMPILER_RESERVED_REGISTERS; pub(crate) const TWENTY_FOUR_BITS: u64 = 0b1111_1111_1111_1111_1111_1111; pub(crate) const EIGHTEEN_BITS: u64 = 0b11_1111_1111_1111_1111; pub(crate) const TWELVE_BITS: u64 = 0b1111_1111_1111; pub(crate) const SIX_BITS: u64 = 0b11_1111; /// Some arbitrary values used for error codes. pub(crate) const MISMATCHED_SELECTOR_REVERT_CODE: u32 = 123;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/programs.rs
sway-core/src/asm_generation/fuel/programs.rs
mod r#abstract; mod allocated; mod r#final; pub(crate) use allocated::AllocatedProgram; pub(crate) use r#abstract::{AbstractEntry, AbstractProgram}; pub(crate) use r#final::FinalProgram; pub(crate) type SelectorOpt = Option<[u8; 4]>; pub(crate) type FnName = String; pub(crate) type ImmOffset = u64;
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/programs/allocated.rs
sway-core/src/asm_generation/fuel/programs/allocated.rs
use super::{FinalProgram, FnName, SelectorOpt}; use crate::{ asm_generation::{ fuel::{ allocated_abstract_instruction_set::AllocatedAbstractInstructionSet, data_section::DataSection, }, ProgramKind, }, asm_lang::Label, decl_engine::DeclRefFunction, }; /// An [AllocatedProgram] represents code which has allocated registers but still has abstract /// control flow. pub(crate) struct AllocatedProgram { pub(crate) kind: ProgramKind, pub(crate) data_section: DataSection, pub(crate) prologue: AllocatedAbstractInstructionSet, pub(crate) functions: Vec<AllocatedAbstractInstructionSet>, pub(crate) entries: Vec<(SelectorOpt, Label, FnName, Option<DeclRefFunction>)>, } impl AllocatedProgram { pub(crate) fn into_final_program(mut self) -> Result<FinalProgram, crate::CompileError> { // Concat the prologue and all the functions together. let abstract_ops = AllocatedAbstractInstructionSet { ops: std::iter::once(self.prologue.ops) .chain(self.functions.into_iter().map(|f| f.ops)) .flatten() .collect(), }; let far_jump_sizes = abstract_ops.collect_far_jumps(); let (realized_ops, mut label_offsets) = abstract_ops.realize_labels(&mut self.data_section, &far_jump_sizes)?; let ops = realized_ops.allocated_ops(); // Collect the entry point offsets. let entries = self .entries .into_iter() .map(|(selector, label, name, test_decl_ref)| { let offset = label_offsets .remove(&label) .expect("no offset for entry") .offs; (selector, offset, name, test_decl_ref) }) .collect(); Ok(FinalProgram { kind: self.kind, data_section: self.data_section, ops, entries, }) } } impl std::fmt::Display for AllocatedProgram { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, ";; Program kind: {:?}", self.kind)?; writeln!(f, ";; --- Prologue ---\n{}\n", self.prologue)?; writeln!(f, ";; --- Functions ---")?; for function in &self.functions { writeln!(f, "{function}\n")?; } writeln!(f, ";; --- Data ---")?; writeln!(f, "{}", self.data_section) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/programs/final.rs
sway-core/src/asm_generation/fuel/programs/final.rs
use crate::{ asm_generation::{ fuel::data_section::DataSection, instruction_set::InstructionSet, ProgramKind, }, asm_lang::allocated_ops::AllocatedOp, decl_engine::DeclRefFunction, FinalizedAsm, FinalizedEntry, }; use super::{FnName, ImmOffset, SelectorOpt}; /// A [FinalProgram] represents code which may be serialized to VM bytecode. pub(crate) struct FinalProgram { pub(crate) kind: ProgramKind, pub(crate) data_section: DataSection, pub(crate) ops: Vec<AllocatedOp>, pub(crate) entries: Vec<(SelectorOpt, ImmOffset, FnName, Option<DeclRefFunction>)>, } impl FinalProgram { pub(crate) fn finalize(self) -> FinalizedAsm { let FinalProgram { kind, data_section, ops, entries, } = self; FinalizedAsm { data_section, program_section: InstructionSet::Fuel { ops }, program_kind: kind, entries: entries .into_iter() .map(|(selector, imm, fn_name, test_decl_ref)| FinalizedEntry { imm, fn_name, selector, test_decl_ref, }) .collect(), abi: None, } } } impl std::fmt::Display for FinalProgram { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let FinalProgram { kind, data_section, ops, .. } = self; writeln!(f, ";; Program kind: {kind:?}")?; writeln!( f, ".program:\n{}\n{}", ops.iter() .map(|x| format!("{x}")) .collect::<Vec<_>>() .join("\n"), data_section, ) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/programs/abstract.rs
sway-core/src/asm_generation/fuel/programs/abstract.rs
use super::{AllocatedProgram, FnName, SelectorOpt}; use crate::{ asm_generation::{ fuel::{ abstract_instruction_set::AbstractInstructionSet, allocated_abstract_instruction_set::AllocatedAbstractInstructionSet, compiler_constants, data_section::{DataSection, Entry, EntryName}, globals_section::GlobalsSection, register_sequencer::RegisterSequencer, }, ProgramKind, }, asm_lang::{ allocated_ops::{AllocatedInstruction, AllocatedRegister}, AllocatedAbstractOp, ConstantRegister, ControlFlowOp, JumpType, Label, VirtualImmediate12, VirtualImmediate18, VirtualImmediate24, }, decl_engine::DeclRefFunction, OptLevel, }; use either::Either; use sway_error::error::CompileError; use sway_features::ExperimentalFeatures; /// The entry point of an abstract program. pub(crate) struct AbstractEntry { pub(crate) selector: SelectorOpt, pub(crate) label: Label, pub(crate) ops: AbstractInstructionSet, pub(crate) name: FnName, pub(crate) test_decl_ref: Option<DeclRefFunction>, } /// An [AbstractProgram] represents code generated by the compilation from IR, with virtual registers /// and abstract control flow. /// /// Use `AbstractProgram::to_allocated_program()` to perform register allocation. /// pub(crate) struct AbstractProgram { kind: ProgramKind, data_section: DataSection, globals_section: GlobalsSection, before_entries: AbstractInstructionSet, entries: Vec<AbstractEntry>, non_entries: Vec<AbstractInstructionSet>, reg_seqr: RegisterSequencer, experimental: ExperimentalFeatures, } impl AbstractProgram { #[allow(clippy::too_many_arguments)] pub(crate) fn new( kind: ProgramKind, data_section: DataSection, globals_section: GlobalsSection, before_entries: AbstractInstructionSet, entries: Vec<AbstractEntry>, non_entries: Vec<AbstractInstructionSet>, reg_seqr: RegisterSequencer, experimental: ExperimentalFeatures, ) -> Self { AbstractProgram { kind, data_section, globals_section, before_entries, entries, non_entries, reg_seqr, experimental, } } /// True if the [AbstractProgram] does not contain any instructions, or entries, or data in the data section. pub(crate) fn is_empty(&self) -> bool { self.non_entries.is_empty() && self.entries.is_empty() && self.data_section.iter_all_entries().next().is_none() } /// Adds prologue, globals allocation, before entries, contract method switch, and allocates virtual register. pub(crate) fn into_allocated_program( mut self, fallback_fn: Option<crate::asm_lang::Label>, opt_level: OptLevel, ) -> Result<AllocatedProgram, CompileError> { let mut prologue = self.build_prologue(); self.append_globals_allocation(&mut prologue); self.append_before_entries(&mut prologue, opt_level)?; match (self.experimental.new_encoding, self.kind) { (true, ProgramKind::Contract) => { self.append_jump_to_entry(&mut prologue); } (false, ProgramKind::Contract) => { self.append_encoding_v0_contract_abi_switch(&mut prologue, fallback_fn); } _ => {} } // Keep track of the labels (and names) that represent program entry points. let entries = self .entries .iter() .map(|entry| { ( entry.selector, entry.label, entry.name.clone(), entry.test_decl_ref.clone(), ) }) .collect(); // Gather all functions. let all_functions = self .entries .into_iter() .map(|entry| entry.ops) .chain(self.non_entries); // Optimize and then verify abstract functions. let abstract_functions = all_functions .map(|instruction_set| instruction_set.optimize(&self.data_section, opt_level)) .map(AbstractInstructionSet::verify) .collect::<Result<Vec<AbstractInstructionSet>, CompileError>>()?; // Allocate the registers for each function. let allocated_functions = abstract_functions .into_iter() .map(|abstract_instruction_set| { let allocated = abstract_instruction_set.allocate_registers()?; Ok(allocated.emit_pusha_popa()) }) .collect::<Result<Vec<AllocatedAbstractInstructionSet>, CompileError>>()?; // Optimize allocated functions. // TODO: Add verification. E.g., verify that the stack use for each function is balanced. let functions = allocated_functions .into_iter() .map(|instruction_set| instruction_set.optimize()) .collect::<Vec<AllocatedAbstractInstructionSet>>(); Ok(AllocatedProgram { kind: self.kind, data_section: self.data_section, prologue, functions, entries, }) } fn append_before_entries( &self, prologue: &mut AllocatedAbstractInstructionSet, opt_level: OptLevel, ) -> Result<(), CompileError> { let before_entries = self .before_entries .clone() .optimize(&self.data_section, opt_level); let before_entries = before_entries.verify()?; let mut before_entries = before_entries.allocate_registers()?; prologue.ops.append(&mut before_entries.ops); Ok(()) } /// Builds the asm preamble, which includes metadata and a jump past the metadata. /// Right now, it looks like this: /// /// WORD OP /// 1 MOV $scratch $pc /// - JMPF $zero i10 /// 2 DATA_START (0-32) (in bytes, offset from $is) /// - DATA_START (32-64) /// 3 CONFIGURABLES_OFFSET (0-32) /// - CONFIGURABLES_OFFSET (32-64) /// 4 LW $ds $scratch 1 /// - ADD $ds $ds $scratch /// 5 .program_start: fn build_prologue(&mut self) -> AllocatedAbstractInstructionSet { const _: () = assert!( crate::PRELUDE_CONFIGURABLES_OFFSET_IN_BYTES == 16, "Inconsistency in the assumption of prelude organisation" ); const _: () = assert!( crate::PRELUDE_CONFIGURABLES_SIZE_IN_BYTES == 8, "Inconsistency in the assumption of prelude organisation" ); const _: () = assert!( crate::PRELUDE_SIZE_IN_BYTES == 32, "Inconsistency in the assumption of prelude organisation" ); let label = self.reg_seqr.get_label(); AllocatedAbstractInstructionSet { ops: [ AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::MOVE( AllocatedRegister::Constant(ConstantRegister::Scratch), AllocatedRegister::Constant(ConstantRegister::ProgramCounter), )), comment: String::new(), owning_span: None, }, // word 1.5 AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::Jump { to: label, type_: JumpType::Unconditional, }), comment: String::new(), owning_span: None, }, // word 2 -- full word u64 placeholder AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::DataSectionOffsetPlaceholder), comment: "data section offset".into(), owning_span: None, }, // word 3 -- full word u64 placeholder AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::ConfigurablesOffsetPlaceholder), comment: "configurables offset".into(), owning_span: None, }, AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::Label(label)), comment: "end of configurables offset".into(), owning_span: None, }, // word 4 -- load the data offset into $ds AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::LW( AllocatedRegister::Constant(ConstantRegister::DataSectionStart), AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate12::new(1), )), comment: "".into(), owning_span: None, }, // word 4.5 -- add $ds $ds $is AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::ADD( AllocatedRegister::Constant(ConstantRegister::DataSectionStart), AllocatedRegister::Constant(ConstantRegister::DataSectionStart), AllocatedRegister::Constant(ConstantRegister::Scratch), )), comment: "".into(), owning_span: None, }, ] .to_vec(), } } // WHen the new encoding is used, jumps to the `__entry` function fn append_jump_to_entry(&mut self, asm: &mut AllocatedAbstractInstructionSet) { let entry = self.entries.iter().find(|x| x.name == "__entry").unwrap(); asm.ops.push(AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::Jump { to: entry.label, type_: JumpType::Unconditional, }), comment: "jump to ABI function selector".into(), owning_span: None, }); } /// Builds the contract switch statement based on the first argument to a contract call: the /// 'selector'. /// See https://fuellabs.github.io/fuel-specs/master/vm#call-frames which /// describes the first argument to be at word offset 73. fn append_encoding_v0_contract_abi_switch( &mut self, asm: &mut AllocatedAbstractInstructionSet, fallback_fn: Option<crate::asm_lang::Label>, ) { const SELECTOR_WORD_OFFSET: u64 = 73; const INPUT_SELECTOR_REG: AllocatedRegister = AllocatedRegister::Allocated(0); const PROG_SELECTOR_REG: AllocatedRegister = AllocatedRegister::Allocated(1); const CMP_RESULT_REG: AllocatedRegister = AllocatedRegister::Allocated(2); // Build the switch statement for selectors. asm.ops.push(AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::Comment), comment: "[function selection]: begin contract function selector switch".into(), owning_span: None, }); // Load the selector from the call frame. asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::LW( INPUT_SELECTOR_REG, AllocatedRegister::Constant(ConstantRegister::FramePointer), VirtualImmediate12::new(SELECTOR_WORD_OFFSET), )), comment: "[function selection]: load input function selector".into(), owning_span: None, }); // Add a 'case' for each entry with a selector. for entry in &self.entries { let selector = match entry.selector { Some(sel) => sel, // Skip entries that don't have a selector - they're probably tests. None => continue, }; // Put the selector in the data section. let data_label = self.data_section.insert_data_value(Entry::new_word( u32::from_be_bytes(selector) as u64, EntryName::NonConfigurable, None, )); // Load the data into a register for comparison. asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::LoadDataId( PROG_SELECTOR_REG, data_label, )), comment: format!( "[function selection]: load function {} selector for comparison", entry.name ), owning_span: None, }); // Compare with the input selector. asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::EQ( CMP_RESULT_REG, INPUT_SELECTOR_REG, PROG_SELECTOR_REG, )), comment: format!( "[function selection]: compare function {} selector with input selector", entry.name ), owning_span: None, }); // Jump to the function label if the selector was equal. asm.ops.push(AllocatedAbstractOp { // If the comparison result is _not_ equal to 0, then it was indeed equal. opcode: Either::Right(ControlFlowOp::Jump { to: entry.label, type_: JumpType::NotZero(CMP_RESULT_REG), }), comment: "[function selection]: jump to selected contract function".into(), owning_span: None, }); } if let Some(fallback_fn) = fallback_fn { asm.ops.push(AllocatedAbstractOp { opcode: Either::Right(ControlFlowOp::Jump { to: fallback_fn, type_: JumpType::Call, }), comment: "[function selection]: call contract fallback function".into(), owning_span: None, }); } asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::MOVI( AllocatedRegister::Constant(ConstantRegister::Scratch), VirtualImmediate18::new(compiler_constants::MISMATCHED_SELECTOR_REVERT_CODE.into()), )), comment: "[function selection]: load revert code for mismatched function selector" .into(), owning_span: None, }); asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::RVRT(AllocatedRegister::Constant( ConstantRegister::Scratch, ))), comment: "[function selection]: revert if no selectors have matched".into(), owning_span: None, }); } fn append_globals_allocation(&self, asm: &mut AllocatedAbstractInstructionSet) { let len_in_bytes = self.globals_section.len_in_bytes(); asm.ops.push(AllocatedAbstractOp { opcode: Either::Left(AllocatedInstruction::CFEI(VirtualImmediate24::new( len_in_bytes, ))), comment: "allocate stack space for globals".into(), owning_span: None, }); } } impl std::fmt::Display for AbstractProgram { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, ";; Program kind: {:?}", self.kind)?; writeln!(f, ";; --- Before Entries ---")?; writeln!(f, "{}\n", self.before_entries)?; writeln!(f, ";; --- Entries ---")?; for entry in &self.entries { writeln!(f, "{}\n", entry.ops)?; } writeln!(f, ";; --- Functions ---")?; for function in &self.non_entries { writeln!(f, "{function}\n")?; } writeln!(f, ";; --- Data ---")?; write!(f, "{}", self.data_section) } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/misc.rs
sway-core/src/asm_generation/fuel/optimizations/misc.rs
use super::super::abstract_instruction_set::AbstractInstructionSet; use crate::asm_lang::{JumpType, Op, OrganizationalOp, VirtualOp, VirtualRegister}; use std::collections::HashSet; use either::Either; impl AbstractInstructionSet { /// Removes any jumps to the subsequent line. pub(crate) fn remove_sequential_jumps(mut self) -> AbstractInstructionSet { let dead_jumps: Vec<_> = self .ops .windows(2) .enumerate() .filter_map(|(idx, ops)| match (&ops[0].opcode, &ops[1].opcode) { ( Either::Right(OrganizationalOp::Jump { to: dst_label, type_: JumpType::Unconditional | JumpType::NotZero(_), .. }), Either::Right(OrganizationalOp::Label(label)), ) if dst_label == label => Some(idx), _otherwise => None, }) .collect(); // Replace the dead jumps with NOPs, as it's cheaper. for idx in dead_jumps { self.ops[idx] = Op { opcode: Either::Left(VirtualOp::NOOP), comment: "remove redundant jump operation".into(), owning_span: None, }; } self } pub(crate) fn remove_redundant_moves(mut self) -> AbstractInstructionSet { // This has a lot of room for improvement. // // For now it is just removing MOVEs to registers which are _never_ used. It doesn't // analyse control flow or other redundancies. Some obvious improvements are: // // - Perform a control flow analysis to remove MOVEs to registers which are not used // _after_ the MOVE. // // - Remove the redundant use of temporaries. E.g.: // MOVE t, a MOVE b, a // MOVE b, t => USE b // USE b loop { // Gather all the uses for each register. let uses: HashSet<&VirtualRegister> = self.ops.iter().fold(HashSet::new(), |mut acc, op| { for u in &op.use_registers() { acc.insert(u); } acc }); // Loop again and find MOVEs which have a non-constant destination which is never used. let mut dead_moves = Vec::new(); for (idx, op) in self.ops.iter().enumerate() { if let Either::Left(VirtualOp::MOVE( dst_reg @ VirtualRegister::Virtual(_), _src_reg, )) = &op.opcode { if !uses.contains(dst_reg) { dead_moves.push(idx); } } } if dead_moves.is_empty() { break; } // Replace the dead moves with NOPs, as it's cheaper. for idx in dead_moves { self.ops[idx] = Op { opcode: Either::Left(VirtualOp::NOOP), comment: "remove redundant move operation".into(), owning_span: None, }; } } self } pub(crate) fn remove_redundant_ops(mut self) -> AbstractInstructionSet { self.ops.retain(|op| { // It is easier to think in terms of operations we want to remove // than the operations we want to retain ;-) #[allow(clippy::match_like_matches_macro)] // Keep the `match` for adding more ops in the future. let remove = match &op.opcode { Either::Left(VirtualOp::NOOP) => true, Either::Left(VirtualOp::MOVE(a, b)) => a == b, Either::Left(VirtualOp::CFEI(_, imm)) | Either::Left(VirtualOp::CFSI(_, imm)) => { imm.value() == 0 } _ => false, }; !remove }); self } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/const_indexed_aggregates.rs
sway-core/src/asm_generation/fuel/optimizations/const_indexed_aggregates.rs
use rustc_hash::FxHashMap; use crate::{ asm_generation::fuel::compiler_constants, asm_lang::{VirtualImmediate12, VirtualOp, VirtualRegister}, }; use super::super::{abstract_instruction_set::AbstractInstructionSet, data_section::DataSection}; impl AbstractInstructionSet { // Aggregates that are const index accessed from a base address // can use the IMM field of LW/SW if the value fits in 12 bits. // Only the LW/SW instructions are modified, and the redundant // computations left untouched, to be later removed by a DCE pass. pub(crate) fn const_indexing_aggregates_function(mut self, data_section: &DataSection) -> Self { // Poor man's SSA (local ... per block). #[derive(PartialEq, Eq, Hash, Clone, Debug)] struct VRegDef { reg: VirtualRegister, ver: u32, } // What does a register contain? #[derive(Debug, PartialEq, Eq)] enum RegContents { Constant(u64), BaseOffset(VRegDef, u64), } // What is the latest version of a vreg definition. let mut latest_version = FxHashMap::<VirtualRegister, u32>::default(); // Track register contents as we progress instructions in a block. let mut reg_contents = FxHashMap::<VirtualRegister, RegContents>::default(); // Record that we saw a new definition of `reg`. fn record_new_def( latest_version: &mut FxHashMap<VirtualRegister, u32>, reg: &VirtualRegister, ) { latest_version .entry(reg.clone()) .and_modify(|ver| *ver += 1) .or_insert(1); } // What's the latest definition we've seen of `reg`? fn get_def_version( latest_version: &FxHashMap<VirtualRegister, u32>, reg: &VirtualRegister, ) -> u32 { latest_version.get(reg).cloned().unwrap_or(0) } fn process_add( reg_contents: &mut FxHashMap<VirtualRegister, RegContents>, latest_version: &mut FxHashMap<VirtualRegister, u32>, dest: &VirtualRegister, opd1: &VirtualRegister, c2: u64, ) { match reg_contents.get(opd1) { Some(RegContents::Constant(c1)) if c1.checked_add(c2).is_some() => { reg_contents.insert(dest.clone(), RegContents::Constant(c1 + c2)); record_new_def(latest_version, dest); } Some(RegContents::BaseOffset(base_reg, offset)) if get_def_version(latest_version, &base_reg.reg) == base_reg.ver && offset.checked_add(c2).is_some() => { reg_contents.insert( dest.clone(), RegContents::BaseOffset(base_reg.clone(), offset + c2), ); record_new_def(latest_version, dest); } _ => { let base = VRegDef { reg: opd1.clone(), ver: get_def_version(latest_version, opd1), }; reg_contents.insert(dest.clone(), RegContents::BaseOffset(base, c2)); record_new_def(latest_version, dest); } } } self.ops.retain_mut(|op| { let mut retain = true; let mut clear_state = false; // Uncomment to debug what this optimization is doing // let op_before = op.clone(); match &mut op.opcode { either::Either::Left(op) => { match op { VirtualOp::ADD(dest, opd1, opd2) => { // We don't look for the first operand being a constant and the second // one a base register. Such patterns must be canonicalised prior. if let Some(&RegContents::Constant(c2)) = reg_contents.get(opd2) { process_add(&mut reg_contents, &mut latest_version, dest, opd1, c2); } else { reg_contents.remove(dest); record_new_def(&mut latest_version, dest); }; } VirtualOp::ADDI(dest, opd1, opd2) => { let c2 = opd2.value() as u64; process_add(&mut reg_contents, &mut latest_version, dest, opd1, c2); } VirtualOp::MUL(dest, opd1, opd2) => { match (reg_contents.get(opd1), reg_contents.get(opd2)) { ( Some(RegContents::Constant(c1)), Some(RegContents::Constant(c2)), ) => { reg_contents .insert(dest.clone(), RegContents::Constant(c1 * c2)); record_new_def(&mut latest_version, dest); } _ => { reg_contents.remove(dest); record_new_def(&mut latest_version, dest); } } } VirtualOp::LoadDataId(dest, data_id) => { if let Some(c) = data_section.get_data_word(data_id) { reg_contents.insert(dest.clone(), RegContents::Constant(c)); } else { reg_contents.remove(dest); } record_new_def(&mut latest_version, dest); } VirtualOp::MOVI(dest, imm) => { reg_contents .insert(dest.clone(), RegContents::Constant(imm.value() as u64)); record_new_def(&mut latest_version, dest); } VirtualOp::LW(dest, addr_reg, imm) => match reg_contents.get(addr_reg) { Some(RegContents::BaseOffset(base_reg, offset)) if offset % 8 == 0 && get_def_version(&latest_version, &base_reg.reg) == base_reg.ver && ((offset / 8) + imm.value() as u64) < compiler_constants::TWELVE_BITS => { let new_imm = VirtualImmediate12::new((offset / 8) + imm.value() as u64); let new_lw = VirtualOp::LW(dest.clone(), base_reg.reg.clone(), new_imm); // The register defined is no more useful for us. Forget anything from its past. reg_contents.remove(dest); record_new_def(&mut latest_version, dest); // Replace the LW with a new one in-place. *op = new_lw; } _ => { reg_contents.remove(dest); record_new_def(&mut latest_version, dest); } }, VirtualOp::SW(addr_reg, src, imm) => match reg_contents.get(addr_reg) { Some(RegContents::BaseOffset(base_reg, offset)) if offset % 8 == 0 && get_def_version(&latest_version, &base_reg.reg) == base_reg.ver && ((offset / 8) + imm.value() as u64) < compiler_constants::TWELVE_BITS => { let new_imm = VirtualImmediate12::new((offset / 8) + imm.value() as u64); let new_sw = VirtualOp::SW(base_reg.reg.clone(), src.clone(), new_imm); // Replace the SW with a new one in-place. *op = new_sw; } _ => (), }, VirtualOp::MOVE(dest, src) => { let ver = get_def_version(&latest_version, src); if let Some(RegContents::BaseOffset(src, 0)) = reg_contents.get(src) { if dest == &src.reg && src.ver == ver { retain = false; } } else { reg_contents.insert( dest.clone(), RegContents::BaseOffset( VRegDef { reg: src.clone(), ver, }, 0, ), ); } } _ => { // For every Op that we don't know about, // forget everything we know about its def registers. for def_reg in op.def_registers() { reg_contents.remove(def_reg); record_new_def(&mut latest_version, def_reg); } } } } either::Either::Right(_) => { clear_state = true; } } // Uncomment to debug what this optimization is doing //let before = op_before.opcode.to_string(); //let after = op.opcode.to_string(); // println!("{}", before); if clear_state { latest_version.clear(); reg_contents.clear(); // println!(" state cleared"); } // Uncomment to debug what this optimization is doing // if before != after { // println!(" optimized to"); // println!(" {}", after); // println!(" using"); // for (k, v) in reg_contents.iter() { // println!(" - {:?} -> {:?}", k, v); // } // } // if !retain { // println!(" removed"); // for (k, v) in reg_contents.iter() { // println!(" - {:?} -> {:?}", k, v); // } // } // if forget_def_registers { // for def_reg in op.def_registers() { // println!(" forget {}", def_reg.to_string()); // } // } retain }); self } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/reachability.rs
sway-core/src/asm_generation/fuel/optimizations/reachability.rs
use std::collections::{BTreeSet, HashMap}; use either::Either; use rustc_hash::FxHashSet; use crate::asm_lang::{ControlFlowOp, JumpType, Label}; use super::super::{abstract_instruction_set::AbstractInstructionSet, analyses::liveness_analysis}; impl AbstractInstructionSet { pub(crate) fn dce(mut self) -> AbstractInstructionSet { let liveness = liveness_analysis(&self.ops, false); let ops = &self.ops; let mut cur_live = BTreeSet::default(); let mut dead_indices = FxHashSet::default(); for (rev_ix, op) in ops.iter().rev().enumerate() { let ix = ops.len() - rev_ix - 1; let op_use = op.use_registers(); let mut op_def = op.def_registers(); op_def.append(&mut op.def_const_registers()); if let Either::Right(ControlFlowOp::Jump { type_, .. }) = &op.opcode { if !matches!(type_, JumpType::Call) { // Block boundary. Start afresh. cur_live.clone_from(liveness.get(ix).expect("Incorrect liveness info")); // Add use(op) to cur_live. for u in op_use { cur_live.insert(u.clone()); } continue; } } let dead = op_def.iter().all(|def| !cur_live.contains(def)) && match &op.opcode { Either::Left(op) => !op.has_side_effect(), Either::Right(_) => false, }; // Remove def(op) from cur_live. for def in &op_def { cur_live.remove(def); } if dead { dead_indices.insert(ix); } else { // Add use(op) to cur_live for u in op_use { cur_live.insert(u.clone()); } } } // Actually delete the instructions. let mut new_ops: Vec<_> = std::mem::take(&mut self.ops) .into_iter() .enumerate() .filter_map(|(idx, op)| { if !dead_indices.contains(&idx) { Some(op) } else { None } }) .collect(); std::mem::swap(&mut self.ops, &mut new_ops); self } // Remove unreachable instructions. pub(crate) fn simplify_cfg(mut self) -> AbstractInstructionSet { let ops = &self.ops; if ops.is_empty() { return self; } // Keep track of a map between jump labels and op indices. Useful to compute op successors. let mut label_to_index: HashMap<Label, usize> = HashMap::default(); for (idx, op) in ops.iter().enumerate() { if let Either::Right(ControlFlowOp::Label(op_label)) = op.opcode { label_to_index.insert(op_label, idx); } } let mut reachables = vec![false; ops.len()]; let mut worklist = vec![0]; while let Some(op_idx) = worklist.pop() { if reachables[op_idx] { continue; } reachables[op_idx] = true; let op = &ops[op_idx]; for s in &op.successors(op_idx, ops, &label_to_index) { if reachables[*s] { continue; } worklist.push(*s); } } let reachable_ops = self .ops .into_iter() .enumerate() .filter_map(|(idx, op)| if reachables[idx] { Some(op) } else { None }) .collect(); self.ops = reachable_ops; self } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/verify.rs
sway-core/src/asm_generation/fuel/optimizations/verify.rs
use std::collections::HashSet; use sway_error::error::CompileError; use sway_types::Span; use crate::{ asm_generation::fuel::abstract_instruction_set::AbstractInstructionSet, asm_lang::VirtualRegister, }; impl AbstractInstructionSet { // At the moment the only verification we do is to make sure used registers are // initialised. Without doing dataflow analysis we still can't guarantee the init is // _before_ the use, but future refactoring to convert abstract ops into SSA and BBs will // make this possible or even make this check redundant. pub(crate) fn verify(self) -> Result<AbstractInstructionSet, CompileError> { macro_rules! add_virt_regs { ($regs: expr, $set: expr) => { let mut regs = $regs; regs.retain(|&reg| matches!(reg, VirtualRegister::Virtual(_))); $set.extend(regs.into_iter()); }; } let mut use_regs = HashSet::new(); let mut def_regs = HashSet::new(); for op in &self.ops { add_virt_regs!(op.use_registers(), use_regs); add_virt_regs!(op.def_registers(), def_regs); } if def_regs.is_superset(&use_regs) { Ok(self) } else { let bad_regs = use_regs .difference(&def_regs) .map(|reg| match reg { VirtualRegister::Virtual(name) => format!("$r{name}"), VirtualRegister::Constant(creg) => creg.to_string(), }) .collect::<Vec<_>>() .join(", "); Err(CompileError::InternalOwned( format!("Program erroneously uses uninitialized virtual registers: {bad_regs}"), Span::dummy(), )) } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/mod.rs
sway-core/src/asm_generation/fuel/optimizations/mod.rs
mod const_indexed_aggregates; mod constant_propagate; mod misc; mod reachability; mod verify; use std::cmp::Ordering; use super::abstract_instruction_set::AbstractInstructionSet; use crate::OptLevel; use super::data_section::DataSection; /// Maximum number of optimization rounds to perform in release build. const MAX_OPT_ROUNDS: usize = 10; impl AbstractInstructionSet { pub(crate) fn optimize( mut self, data_section: &DataSection, level: OptLevel, ) -> AbstractInstructionSet { match level { // On debug builds do a single pass through the simple optimizations OptLevel::Opt0 => self .const_indexing_aggregates_function(data_section) .constant_propagate() .dce() .simplify_cfg() .remove_sequential_jumps() .remove_redundant_moves() .remove_redundant_ops(), // On release builds we can do more iterations OptLevel::Opt1 => { for _ in 0..MAX_OPT_ROUNDS { let old = self.clone(); // run two rounds, so that if an optimization depends on another // it will be applied at least once self = self.optimize(data_section, OptLevel::Opt0); self = self.optimize(data_section, OptLevel::Opt0); match self.ops.len().cmp(&old.ops.len()) { // Not able to optimize anything, stop here Ordering::Equal => break, // Never accept worse results Ordering::Greater => return old, // We reduced the number of ops, so continue Ordering::Less => {} } } self } } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/sway-core/src/asm_generation/fuel/optimizations/constant_propagate.rs
sway-core/src/asm_generation/fuel/optimizations/constant_propagate.rs
use std::collections::hash_map::Entry; use either::Either; use rustc_hash::FxHashMap; use crate::asm_lang::{ ConstantRegister, ControlFlowOp, JumpType, Label, Op, VirtualOp, VirtualRegister, }; use super::super::abstract_instruction_set::AbstractInstructionSet; #[derive(Clone, Debug, PartialEq, Eq)] enum KnownRegValue { Const(u64), Eq(VirtualRegister), } impl KnownRegValue { /// If the value can be represented as a register, return it. fn register(&self) -> Option<VirtualRegister> { match self { KnownRegValue::Const(0) => Some(VirtualRegister::Constant(ConstantRegister::Zero)), KnownRegValue::Const(1) => Some(VirtualRegister::Constant(ConstantRegister::One)), KnownRegValue::Eq(v) => Some(v.clone()), _ => None, } } /// If the value can be represented as a constant, return it. fn value(&self) -> Option<u64> { match self { KnownRegValue::Const(v) => Some(*v), KnownRegValue::Eq(VirtualRegister::Constant(ConstantRegister::Zero)) => Some(0), KnownRegValue::Eq(VirtualRegister::Constant(ConstantRegister::One)) => Some(1), KnownRegValue::Eq(_) => None, } } /// Check if the value depends on value of another register. fn depends_on(&self, reg: &VirtualRegister) -> bool { match self { KnownRegValue::Const(_) => false, KnownRegValue::Eq(v) => v == reg, } } } #[derive(Clone, Debug, Default)] struct KnownValues { values: FxHashMap<VirtualRegister, KnownRegValue>, } impl KnownValues { /// Resolve a register to a known value. fn resolve(&self, v: &VirtualRegister) -> Option<KnownRegValue> { match v { VirtualRegister::Constant(ConstantRegister::Zero) => Some(KnownRegValue::Const(0)), VirtualRegister::Constant(ConstantRegister::One) => Some(KnownRegValue::Const(1)), other => self.values.get(other).cloned(), } } /// Clear values that depend on a register having a specific value. fn clear_dependent_on(&mut self, reg: &VirtualRegister) { self.values.retain(|_, v| !v.depends_on(reg)); } /// Insert a known value for a register. fn assign(&mut self, dst: VirtualRegister, value: KnownRegValue) { self.clear_dependent_on(&dst); self.values.insert(dst, value); } } /// What knowledge is lost after an op we don't know how to interpret? #[derive(Clone, Debug)] enum ResetKnown { /// Reset all known values Full, /// Reset non-virtual registers in addition to defs NonVirtual, /// Only the `def_registers` and `def_const_registers` are reset Defs, } impl ResetKnown { fn apply(&self, op: &Op, known_values: &mut KnownValues) { match self { ResetKnown::Full => { known_values.values.clear(); } ResetKnown::NonVirtual => { Self::Defs.apply(op, known_values); known_values .values .retain(|k, _| matches!(k, VirtualRegister::Virtual(_))); } ResetKnown::Defs => { for d in op.def_registers() { known_values.clear_dependent_on(d); known_values.values.remove(d); } for d in op.def_const_registers() { known_values.clear_dependent_on(d); known_values.values.remove(d); } } } } } impl AbstractInstructionSet { /// Symbolically interpret code and propagate known register values. pub(crate) fn constant_propagate(mut self) -> AbstractInstructionSet { if self.ops.is_empty() { return self; } // The set of labels that are jump targets, and how many places jump to them. // todo: build proper control flow graph instead let mut jump_target_labels = FxHashMap::<Label, usize>::default(); for op in &self.ops { if let Either::Right(ControlFlowOp::Jump { to, .. }) = &op.opcode { *jump_target_labels.entry(*to).or_default() += 1; } } let mut known_values = KnownValues::default(); for op in &mut self.ops { // Perform constant propagation on the instruction. let mut uses_regs: Vec<_> = op.use_registers_mut().into_iter().collect(); for reg in uses_regs.iter_mut() { // We only optimize over virtual registers here, constant registers shouldn't be replaced if !reg.is_virtual() { continue; } if let Some(r) = known_values.resolve(reg).and_then(|r| r.register()) { **reg = r; } } // Some instructions can be further simplified with the known values. if let Either::Right(ControlFlowOp::Jump { to, type_: JumpType::NotZero(reg), }) = &mut op.opcode { if let Some(con) = known_values.resolve(reg).and_then(|r| r.value()) { if con == 0 { let Entry::Occupied(mut count) = jump_target_labels.entry(*to) else { unreachable!("Jump target label not found in jump_target_labels"); }; *count.get_mut() -= 1; if *count.get() == 0 { // Nobody jumps to this label anymore jump_target_labels.remove(to); } op.opcode = Either::Left(VirtualOp::NOOP); } else { op.opcode = Either::Right(ControlFlowOp::Jump { to: *to, type_: JumpType::Unconditional, }); } } } // Some ops are known to produce certain results, interpret them here. let interpreted_op = match &op.opcode { Either::Left(VirtualOp::MOVI(dst, imm)) => { let imm = KnownRegValue::Const(imm.value() as u64); if known_values.resolve(dst) == Some(imm.clone()) { op.opcode = Either::Left(VirtualOp::NOOP); } else { known_values.assign(dst.clone(), imm); } true } Either::Left(VirtualOp::MOVE(dst, src)) => { if let Some(known) = known_values.resolve(src) { if known_values.resolve(dst) == Some(known.clone()) { op.opcode = Either::Left(VirtualOp::NOOP); } else { known_values.assign(dst.clone(), known); } } else { known_values.assign(dst.clone(), KnownRegValue::Eq(src.clone())); } true } _ => false, }; // If we don't know how to interpret the op, it's outputs are not known. if !interpreted_op { let reset = match &op.opcode { Either::Left(op) => match op { VirtualOp::ECAL(_, _, _, _) => ResetKnown::Full, // TODO: this constraint can be relaxed _ if op.has_side_effect() => ResetKnown::Full, _ => ResetKnown::Defs, }, Either::Right(op) => match op { // If this is a jump target, then multiple execution paths can lead to it, // and we can't assume to know register values. ControlFlowOp::Label(label) => { if jump_target_labels.contains_key(label) { ResetKnown::Full } else { ResetKnown::Defs } } // Jumping away doesn't invalidate state, but for calls: // TODO: `def_const_registers` doesn't contain return value, which // seems incorrect, so I'm clearing everything as a precaution ControlFlowOp::Jump { type_, .. } => match type_ { JumpType::Call => ResetKnown::Full, _ => ResetKnown::Defs, }, // These ops mark their outputs properly and cause no control-flow effects ControlFlowOp::Comment | ControlFlowOp::ConfigurablesOffsetPlaceholder | ControlFlowOp::DataSectionOffsetPlaceholder => ResetKnown::Defs, // This changes the stack pointer ControlFlowOp::PushAll(_) => ResetKnown::NonVirtual, // This can be considered to destroy all known values ControlFlowOp::PopAll(_) => ResetKnown::Full, }, }; reset.apply(op, &mut known_values); } } self } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/lib.rs
forc-plugins/forc-migrate/src/lib.rs
pub mod cli; #[macro_use] mod migrations; mod matching; mod modifying; mod visiting; use std::fmt::Display; use std::io::{self, Write}; /// Returns a single error string formed of the `error` and `instructions`. /// The returned string is formatted to be used as an error message in the [anyhow::bail] macro. fn instructive_error<E: Display, I: Display>(error: E, instructions: &[I]) -> String { let mut error_message = vec![format!("{error}")]; instructions .iter() .map(|inst| format!(" {inst}")) .for_each(|inst| error_message.push(inst)); error_message.join("\n") } /// Returns a single error string representing an internal error. /// The returned string is formatted to be used as an error message in the [anyhow::bail] macro. fn internal_error<E: Display>(error: E) -> String { instructive_error(error, &[ "This is an internal error and signifies a bug in the `forc migrate` tool.", "Please report this error by filing an issue at https://github.com/FuelLabs/sway/issues/new?template=bug_report.yml.", ]) } /// Prints a menu containing numbered `options` and asks to choose one of them. /// Returns zero-indexed index of the chosen option. fn print_single_choice_menu<S: AsRef<str> + Display>(options: &[S]) -> usize { assert!( options.len() > 1, "There must be at least two options to choose from." ); for (i, option) in options.iter().enumerate() { println!("{}. {option}", i + 1); } let mut choice = usize::MAX; while choice == 0 || choice > options.len() { print!("Enter your choice [1..{}]: ", options.len()); io::stdout().flush().unwrap(); let mut input = String::new(); choice = match std::io::stdin().read_line(&mut input) { Ok(_) => match input.trim().parse() { Ok(choice) => choice, Err(_) => continue, }, Err(_) => continue, } } choice - 1 }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/main.rs
forc-plugins/forc-migrate/src/main.rs
use anyhow::Result; fn main() -> Result<()> { forc_migrate::cli::run_cli() }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/visiting/mod.rs
forc-plugins/forc-migrate/src/visiting/mod.rs
//! This module contains common API for visiting elements in lexed and typed trees. #![allow(dead_code)] use anyhow::{bail, Ok, Result}; use itertools::Itertools; use std::sync::Arc; use duplicate::duplicate_item; use sway_ast::{ assignable::ElementAccess, expr::{LoopControlFlow, ReassignmentOp, ReassignmentOpVariant}, keywords::*, AsmBlock, Assignable, Braces, CodeBlockContents, Expr, ExprArrayDescriptor, ExprStructField, ExprTupleDescriptor, IfCondition, IfExpr, Intrinsic, ItemAbi, ItemFn, ItemImpl, ItemImplItem, ItemKind, ItemStorage, ItemStruct, ItemTrait, ItemUse, MatchBranchKind, Parens, PathExprSegment, Punctuated, Statement, StatementLet, StorageEntry, StorageField, }; use sway_core::{ decl_engine::DeclEngine, language::{ lexed::LexedModule, ty::{ TyAbiDecl, TyAstNodeContent, TyCodeBlock, TyDecl, TyExpression, TyExpressionVariant, TyFunctionDecl, TyImplSelfOrTrait, TyIntrinsicFunctionKind, TyModule, TyReassignmentTarget, TySideEffect, TySideEffectVariant, TyStorageDecl, TyStorageField, TyStructDecl, TyTraitDecl, TyTraitItem, TyUseStatement, TyVariableDecl, }, CallPath, }, Engines, TypeId, }; use sway_types::{Ident, Spanned}; use crate::{ internal_error, migrations::{DryRun, MutProgramInfo, ProgramInfo}, }; pub(crate) struct VisitingContext<'a> { /// The name of the current package being migrated. pub pkg_name: &'a str, pub engines: &'a Engines, pub dry_run: DryRun, } /// If a [TreesVisitorMut] modifies the lexed element in a way /// that its corresponding typed element becomes obsolete, /// it must communicate that by returning [InvalidateTypedElement::Yes]. pub(crate) enum InvalidateTypedElement { Yes, No, } // TODO: This is a very first, pragmatic version of the more detailed visitor pattern, // to support migrations localized in expressions, that do not need access to // a larger context. If needed, we can later provide specific `VisitingContext` // for each `visiting_...` method, that will provide additional contextual // information about the parent. Such and similar extensions will be driven // by the concrete need of migrations we will encounter in the future. #[duplicate_item( __TreesVisitor __ref_type(type); [TreesVisitor] [&type]; [TreesVisitorMut] [&mut type]; )] #[allow(unused_variables)] /// Represents a visitor that simultaneously traverses the elements in the lexed tree, /// mutable or immutable, and their corresponding typed elements. /// /// Due to conditional compilation, the corresponding typed elements do not necessarily /// exist. That's why they are always passed as `Option`al. /// /// A [TreesVisitorMut] can mutate lexed elements it visits. While this is far from ideal, /// it is a pragmatic design choice that still allows writing a large category of /// migrations, without developing a full-blown framework for matching, transforming, and /// rendering trees, as proposed in /// [Provide common infrastructure for writing Sway code analyzers and generators](https://github.com/FuelLabs/sway/issues/6836). /// Even just separating the traversal, marking lexed elements for change, and then changing /// them in a separate pass, would be an investment that hardly pays off only for migrations. /// /// The consequence of the fact, that the visitor can mutate the tree it traverses, requires /// invalidation of the corresponding typed element, which is handled via [InvalidateTypedElement]. /// /// Visitors can have their own state, but most of them will only want to collect [Span]s /// of occurrences to migrate. To avoid boilerplate code in visitors and support that /// most common case, all the `visit_...` methods provide a convenient mutable `output` /// argument, that can be used to collect the output of a migration step, most commonly /// the [Span]s of occurrences. pub(crate) trait __TreesVisitor<O> { fn visit_module( &mut self, ctx: &VisitingContext, lexed_module: __ref_type([LexedModule]), ty_module: Option<&TyModule>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_use( &mut self, ctx: &VisitingContext, lexed_use: __ref_type([ItemUse]), ty_use: Option<&TyUseStatement>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_struct_decl( &mut self, ctx: &VisitingContext, lexed_struct: __ref_type([ItemStruct]), ty_struct: Option<Arc<TyStructDecl>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_trait_decl( &mut self, ctx: &VisitingContext, lexed_struct: __ref_type([ItemTrait]), ty_struct: Option<Arc<TyTraitDecl>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_abi_decl( &mut self, ctx: &VisitingContext, lexed_struct: __ref_type([ItemAbi]), ty_struct: Option<Arc<TyAbiDecl>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_fn_decl( &mut self, ctx: &VisitingContext, lexed_fn: __ref_type([ItemFn]), ty_fn: Option<Arc<TyFunctionDecl>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_storage_decl( &mut self, ctx: &VisitingContext, lexed_fn: __ref_type([ItemStorage]), ty_fn: Option<Arc<TyStorageDecl>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_storage_field_decl( &mut self, ctx: &VisitingContext, lexed_storage_field: __ref_type([StorageField]), ty_storage_field: Option<&TyStorageField>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_impl( &mut self, ctx: &VisitingContext, lexed_impl: __ref_type([ItemImpl]), ty_impl: Option<Arc<TyImplSelfOrTrait>>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_block( &mut self, ctx: &VisitingContext, lexed_block: __ref_type([CodeBlockContents]), ty_block: Option<&TyCodeBlock>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_asm( &mut self, ctx: &VisitingContext, lexed_asm: __ref_type([AsmBlock]), ty_asm: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_statement_let( &mut self, ctx: &VisitingContext, lexed_let: __ref_type([StatementLet]), ty_var_decl: Option<&TyVariableDecl>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_expr( &mut self, ctx: &VisitingContext, lexed_expr: __ref_type([Expr]), ty_expr: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_if( &mut self, ctx: &VisitingContext, lexed_if: __ref_type([IfExpr]), ty_if: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } /// If the `ty_fn_call` is `None`, the `lexed_fn_call` could also be an enum instantiation, /// and not necessarily a function call. fn visit_fn_call( &mut self, ctx: &VisitingContext, lexed_fn_call: __ref_type([Expr]), ty_fn_call: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } /// Method calls can be regular method calls, like, e.g., `x.method()`, /// or contract method calls, like, e.g., `contract.method()`, or `contract.method { gas: 10000 } ()`. /// To extract lexed and typed information about the method call, /// use `LexedMethodCallInfo/Mut` and `TyMethodCallInfo`, respectively, fn visit_method_call( &mut self, ctx: &VisitingContext, lexed_method_call: __ref_type([Expr]), ty_method_call: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_intrinsic_call( &mut self, ctx: &VisitingContext, lexed_intrinsic_call: __ref_type([Expr]), ty_intrinsic_call: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } fn visit_enum_instantiation( &mut self, ctx: &VisitingContext, lexed_enum_instantiation: __ref_type([Expr]), ty_enum_instantiation: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } #[allow(clippy::too_many_arguments)] fn visit_reassignment( &mut self, ctx: &VisitingContext, lexed_op: __ref_type([ReassignmentOp]), lexed_lhs: __ref_type([Assignable]), ty_lhs: Option<&TyReassignmentTarget>, lexed_rhs: __ref_type([Expr]), ty_rhs: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } #[allow(clippy::too_many_arguments)] fn visit_binary_op( &mut self, ctx: &VisitingContext, op: &'static str, lexed_lhs: __ref_type([Expr]), ty_lhs: Option<&TyExpression>, lexed_rhs: __ref_type([Expr]), ty_rhs: Option<&TyExpression>, output: &mut Vec<O>, ) -> Result<InvalidateTypedElement> { Ok(InvalidateTypedElement::No) } } #[allow(dead_code)] pub(crate) struct ProgramVisitor; pub(crate) struct ProgramVisitorMut; #[duplicate_item( __ProgramVisitor __ProgramInfo __TreesVisitor __LexedMethodCallInfo __ref_type(type) __ref(value) __iter __as_ref; [ProgramVisitor] [ProgramInfo] [TreesVisitor] [LexedMethodCallInfo] [&type] [&value] [iter] [as_ref]; [ProgramVisitorMut] [MutProgramInfo] [TreesVisitorMut] [LexedMethodCallInfoMut] [&mut type] [&mut value] [iter_mut] [as_mut]; )] impl __ProgramVisitor { pub(crate) fn visit_program<V, O>( program_info: __ref_type([__ProgramInfo]), dry_run: DryRun, visitor: &mut V, ) -> Result<Vec<O>> where V: __TreesVisitor<O>, { let ctx = VisitingContext { #[allow(clippy::needless_borrow)] // Clippy lint false positive. Actually, a Clippy bug. pkg_name: &program_info.pkg_name, engines: program_info.engines, dry_run, }; let mut output = vec![]; Self::visit_module( &ctx, __ref([program_info.lexed_program.root]), Some(&program_info.ty_program.root_module), visitor, &mut output, )?; Ok(output) } fn visit_module<V, O>( ctx: &VisitingContext, lexed_module: __ref_type([LexedModule]), ty_module: Option<&TyModule>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_module = match visitor.visit_module(ctx, lexed_module, ty_module, output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_module, }; // We need to visit submodules separately of other items, because they // are actually stored in `lexed_modules.submodules`. for submodule in lexed_module.submodules.__iter() { let ty_submodule = ty_module.and_then(|ty_module| { ty_module .submodules .iter() .find(|ty_submodule| ty_submodule.0 == submodule.0) .map(|ty_submodule| &*ty_submodule.1.module) }); Self::visit_module( ctx, __ref([submodule.1.module]), ty_submodule, visitor, output, )?; } for annotated_item in lexed_module.tree.value.items.__iter() { match __ref([annotated_item.value]) { ItemKind::Submodule(_submodule) => { // TODO: Implement visiting `mod`. // Modules are already visited above, but we also want to // visit `mod` items, in case migrations need to inspect // or modify them. } ItemKind::Use(item_use) => { let ty_use = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::SideEffect(TySideEffect { side_effect: TySideEffectVariant::UseStatement(ty_use), }) if ty_use.span == item_use.span() => Some(ty_use), _ => None, }) }); visitor.visit_use(ctx, item_use, ty_use, output)?; } ItemKind::Struct(item_struct) => { let ty_struct_decl = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::StructDecl( ty_struct_decl, )) => { let ty_struct_decl = ctx.engines.de().get_struct(&ty_struct_decl.decl_id); if ty_struct_decl.span == item_struct.span() { Some(ty_struct_decl) } else { None } } _ => None, }) }); visitor.visit_struct_decl(ctx, item_struct, ty_struct_decl, output)?; } ItemKind::Enum(_item_enum) => { // TODO: Implement visiting `enum`. } ItemKind::Fn(item_fn) => { let ty_fn = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::FunctionDecl( function_decl, )) => { let function_decl = ctx.engines.de().get_function(&function_decl.decl_id); (function_decl.name == item_fn.fn_signature.name) .then_some(function_decl) } _ => None, }) }); Self::visit_fn_decl(ctx, item_fn, ty_fn, visitor, output)?; } ItemKind::Trait(item_trait) => { let ty_decl = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::TraitDecl(trait_decl)) => { let trait_decl = ctx.engines.de().get_trait(&trait_decl.decl_id); (trait_decl.span == item_trait.span()).then_some(trait_decl) } _ => None, }) }); Self::visit_trait_decl(ctx, item_trait, ty_decl, visitor, output)?; } ItemKind::Impl(item_impl) => { let ty_impl = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::ImplSelfOrTrait( impl_decl, )) => { let impl_decl = ctx.engines.de().get_impl_self_or_trait(&impl_decl.decl_id); (impl_decl.span == item_impl.span()).then_some(impl_decl) } _ => None, }) }); Self::visit_impl(ctx, item_impl, ty_impl, visitor, output)?; } ItemKind::Abi(item_abi) => { let ty_decl = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::AbiDecl(abi_decl)) => { let abi_decl = ctx.engines.de().get_abi(&abi_decl.decl_id); (abi_decl.span == item_abi.span()).then_some(abi_decl) } _ => None, }) }); Self::visit_abi_decl(ctx, item_abi, ty_decl, visitor, output)?; } ItemKind::Const(_item_const) => { // TODO: Implement visiting `const`. } ItemKind::Storage(item_storage) => { let ty_decl = ty_module.and_then(|ty_module| { ty_module .all_nodes .iter() .find_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::StorageDecl( storage_decl, )) => { let storage_decl = ctx.engines.de().get_storage(&storage_decl.decl_id); // There can be only one storage declaration in the module. Some(storage_decl) } _ => None, }) }); Self::visit_storage_decl(ctx, item_storage, ty_decl, visitor, output)?; } ItemKind::Configurable(_item_configurable) => { // TODO: Implement visiting `configurable`. } ItemKind::TypeAlias(_item_type_alias) => { // TODO: Implement visiting `type`. } ItemKind::Error(_spans, _error_emitted) => { bail!(internal_error("`ItemKind::Error` cannot happen, because `forc migrate` analyzes only successfully compiled programs.")); } } } Ok(()) } fn visit_trait_decl<V, O>( ctx: &VisitingContext, lexed_trait: __ref_type([ItemTrait]), ty_trait: Option<Arc<TyTraitDecl>>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_trait = match visitor.visit_trait_decl(ctx, lexed_trait, ty_trait.clone(), output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_trait, }; if let Some(trait_defs) = __ref([lexed_trait.trait_defs_opt]) { for lexed_fn in trait_defs .inner .__iter() .map(|annotated| __ref([annotated.value])) { let ty_fn = ty_trait.as_ref().and_then(|ty_trait| { ty_trait.items.iter().find_map(|item| match item { TyTraitItem::Fn(function_decl) => { let function_decl = ctx.engines.de().get_function(function_decl.id()); (function_decl.name == lexed_fn.fn_signature.name) .then_some(function_decl) } _ => None, }) }); Self::visit_fn_decl(ctx, lexed_fn, ty_fn, visitor, output)?; } } Ok(()) } fn visit_abi_decl<V, O>( ctx: &VisitingContext, lexed_abi: __ref_type([ItemAbi]), ty_abi: Option<Arc<TyAbiDecl>>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_abi = match visitor.visit_abi_decl(ctx, lexed_abi, ty_abi.clone(), output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_abi, }; if let Some(abi_defs) = __ref([lexed_abi.abi_defs_opt]) { for lexed_fn in abi_defs .inner .__iter() .map(|annotated| __ref([annotated.value])) { let ty_fn = ty_abi.as_ref().and_then(|ty_abi| { ty_abi.items.iter().find_map(|item| match item { TyTraitItem::Fn(function_decl) => { let function_decl = ctx.engines.de().get_function(function_decl.id()); (function_decl.name == lexed_fn.fn_signature.name) .then_some(function_decl) } _ => None, }) }); Self::visit_fn_decl(ctx, lexed_fn, ty_fn, visitor, output)?; } } Ok(()) } fn visit_fn_decl<V, O>( ctx: &VisitingContext, lexed_fn: __ref_type([ItemFn]), ty_fn: Option<Arc<TyFunctionDecl>>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_fn = match visitor.visit_fn_decl(ctx, lexed_fn, ty_fn.clone(), output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_fn, }; Self::visit_block( ctx, __ref([lexed_fn.body.inner]), ty_fn.as_ref().map(|ty| &ty.body), visitor, output, )?; Ok(()) } fn visit_storage_decl<V, O>( ctx: &VisitingContext, lexed_storage: __ref_type([ItemStorage]), ty_storage: Option<Arc<TyStorageDecl>>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_storage = match visitor.visit_storage_decl(ctx, lexed_storage, ty_storage.clone(), output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_storage, }; let mut lexed_storage_fields = lexed_storage .entries .inner .__iter() .map(|annotated| __ref([annotated.value])) .collect_vec(); // let lexed_storage_fields = __ref([lexed_storage_fields.as_mut_slice()]); let lexed_storage_fields = lexed_storage_fields.as_mut_slice(); let ty_storage_fields = ty_storage .as_ref() .map(|ty_storage| ty_storage.fields.as_slice()) .unwrap_or(&[]); Self::visit_storage_fields_decls( ctx, lexed_storage_fields, ty_storage_fields, visitor, output, )?; Ok(()) } fn visit_storage_fields_decls<V, O>( ctx: &VisitingContext, lexed_storage_fields: &mut [__ref_type([StorageEntry])], ty_storage_fields: &[TyStorageField], visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { fn visit_storage_field_decl<V, O>( ctx: &VisitingContext, lexed_storage_entry: __ref_type([StorageEntry]), ty_storage_fields: &[TyStorageField], visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { if let Some(lexed_storage_field) = __ref([lexed_storage_entry.field]) { let ty_storage_field = ty_storage_fields .iter() .find(|ty_storage_field| ty_storage_field.span() == lexed_storage_field.span()); let ty_storage_field = match visitor.visit_storage_field_decl( ctx, lexed_storage_field, ty_storage_field, output, )? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_storage_field, }; // Visit the `in` key expression, if it exists. if let Some(lexed_in_key) = __ref([lexed_storage_field.key_expr]) { let ty_in_key = ty_storage_field .and_then(|ty_storage_field| ty_storage_field.key_expression.as_ref()); __ProgramVisitor::visit_expr(ctx, lexed_in_key, ty_in_key, visitor, output)?; } // Visit the initializer expression. let ty_initializer = ty_storage_field.map(|ty_storage_field| &ty_storage_field.initializer); __ProgramVisitor::visit_expr( ctx, __ref([lexed_storage_field.initializer]), ty_initializer, visitor, output, )?; } else if let Some(namespace) = __ref([lexed_storage_entry.namespace]) { for lexed_storage_field in namespace.inner.__iter() { visit_storage_field_decl( ctx, __ref([lexed_storage_field.value]), ty_storage_fields, visitor, output, )?; } } Ok(()) } for lexed_storage_field in lexed_storage_fields.__iter() { visit_storage_field_decl(ctx, lexed_storage_field, ty_storage_fields, visitor, output)?; } Ok(()) } fn visit_impl<V, O>( ctx: &VisitingContext, lexed_impl: __ref_type([ItemImpl]), ty_impl: Option<Arc<TyImplSelfOrTrait>>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_impl = match visitor.visit_impl(ctx, lexed_impl, ty_impl.clone(), output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_impl, }; for annotated_lexed_impl_item in lexed_impl.contents.inner.__iter() { // TODO: Implement visiting `item's annotations`. let lexed_impl_item = __ref([annotated_lexed_impl_item.value]); match lexed_impl_item { ItemImplItem::Fn(item_fn) => { let ty_item_fn = ty_impl.as_ref().and_then(|ty_impl| { ty_impl.items.iter().find_map(|item| match item { TyTraitItem::Fn(function_decl) => { let function_decl = ctx.engines.de().get_function(function_decl.id()); (function_decl.name == item_fn.fn_signature.name) .then_some(function_decl) } _ => None, }) }); Self::visit_fn_decl(ctx, item_fn, ty_item_fn, visitor, output)?; } ItemImplItem::Const(_item_const) => { // TODO: Implement visiting `associated consts`. } ItemImplItem::Type(_trait_type) => { // TODO: Implement visiting `associated types`. } } } Ok(()) } fn visit_block<V, O>( ctx: &VisitingContext, lexed_block: __ref_type([CodeBlockContents]), ty_block: Option<&TyCodeBlock>, visitor: &mut V, output: &mut Vec<O>, ) -> Result<()> where V: __TreesVisitor<O>, { let ty_block = match visitor.visit_block(ctx, lexed_block, ty_block, output)? { InvalidateTypedElement::Yes => None, InvalidateTypedElement::No => ty_block, }; for statement in lexed_block.statements.__iter() { let ty_node = ty_block.and_then(|ty_block| { ty_block .contents .iter() .find(|ty_node| statement.span().contains(&ty_node.span)) }); match statement { Statement::Let(statement_let) => { let ty_var_decl = ty_node.map(|ty_node| match &ty_node.content { TyAstNodeContent::Declaration(ty_decl) => match ty_decl { TyDecl::VariableDecl(ty_variable_decl) => Ok(ty_variable_decl.as_ref()), _ => bail!(internal_error("`Statement::Let` must correspond to a `TyDecl::VariableDecl`.")), }, _ => bail!(internal_error("`Statement::Let` must correspond to a `TyAstNodeContent::Declaration`.")), } ).transpose()?; Self::visit_statement_let(ctx, statement_let, ty_var_decl, visitor, output)?; } Statement::Item(annotated) => { // TODO: Implement visiting `annotations`. match __ref([annotated.value]) { ItemKind::Use(item_use) => { let ty_use = ty_node.map(|ty_node| match &ty_node.content { TyAstNodeContent::SideEffect(ty_side_effect) => match &ty_side_effect.side_effect { TySideEffectVariant::UseStatement(ty_use) => Ok(ty_use), _ => bail!(internal_error("`ItemKind::Use` must correspond to a `TySideEffectVariant::UseStatement`.")), }, _ => bail!(internal_error("`ItemKind::Use` must correspond to a `TyAstNodeContent::SideEffect`.")), } ).transpose()?; visitor.visit_use(ctx, item_use, ty_use, output)?; } _ => { // TODO: Implement visiting `nested items`. } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
true
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/try_from_bytes_for_b256.rs
forc-plugins/forc-migrate/src/migrations/try_from_bytes_for_b256.rs
#![allow(dead_code)] use crate::{ migrations::{MutProgramInfo, Occurrence}, modifying::*, visiting::{ InvalidateTypedElement, LexedFnCallInfoMut, LexedMethodCallInfoMut, ProgramVisitorMut, TreesVisitorMut, TyFnCallInfo, TyMethodCallInfo, VisitingContext, }, }; use anyhow::{bail, Ok, Result}; use sway_ast::Expr; use sway_core::{ language::{ty::TyExpression, CallPath}, TypeInfo, }; use sway_types::{Span, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind}; // NOTE: We do not fully support cases when `b256::from` is nested within another `b256::from`. // E.g.: `b256::from(Bytes::from(b256::from(nested_bytes)))`. // In such cases, only the outermost `b256::from` will be migrated. // The same is with `Bytes::into`. // In practice, this does not happen. pub(super) const REPLACE_B256_FROM_BYTES_WITH_TRY_FROM_BYTES_STEP: MigrationStep = MigrationStep { title: "Replace `b256::from(<bytes>)` calls with `b256::try_from(<bytes>).unwrap()`", duration: 0, kind: MigrationStepKind::CodeModification( replace_b256_from_bytes_with_try_from_bytes_step, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will replace all the `b256::from(<bytes>)` calls", "with `b256::try_from(<bytes>).unwrap()`.", " ", "E.g.:", " let result = b256::from(some_bytes);", "will become:", " let result = b256::try_from(some_bytes).unwrap();", ], }; pub(super) const REPLACE_BYTES_INTO_B256_WITH_TRY_INTO_B256_STEP: MigrationStep = MigrationStep { title: "Replace `<bytes>.into()` calls with `<bytes>.try_into().unwrap()`", duration: 0, kind: MigrationStepKind::CodeModification( replace_bytes_into_b256_with_try_into_b256_step, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will replace all the `<bytes>.into()` calls resulting in \"b256\"", "with `<bytes>.try_into().unwrap()`.", " ", "E.g.:", " let result: b256 = some_bytes.into();", "will become:", " let result: b256 = some_bytes.try_into().unwrap();", ], }; fn replace_b256_from_bytes_with_try_from_bytes_step( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { struct Visitor; impl TreesVisitorMut<Occurrence> for Visitor { fn visit_fn_call( &mut self, ctx: &VisitingContext, lexed_fn_call: &mut Expr, ty_fn_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let lexed_fn_call_info = LexedFnCallInfoMut::new(lexed_fn_call)?; let ty_fn_call_info = ty_fn_call .map(|ty_fn_call| TyFnCallInfo::new(ctx.engines.de(), ty_fn_call)) .transpose()?; // We need the typed info in order to ensure that the `from` function // is really the `b256::from(Bytes)` function. let Some(ty_fn_call_info) = ty_fn_call_info else { return Ok(InvalidateTypedElement::No); }; let Some(implementing_for_type_id) = ty_fn_call_info.fn_decl.implementing_for else { return Ok(InvalidateTypedElement::No); }; // Note that neither the implementing for type not the trait are a // part of the `from` function call path. All associated `from` functions // in the `std::bytes` will have the same call path. // We will filter further below to target exactly the `<From<Bytes> for b256>::from`. let from_call_path = CallPath::fullpath(&["std", "bytes", "from"]); // This check is sufficient. The only `from` in `std::bytes` that // satisfies it is the `<From<Bytes> for b256>::from`. if !(ty_fn_call_info.fn_decl.call_path == from_call_path && implementing_for_type_id == ctx.engines.te().id_of_b256()) { return Ok(InvalidateTypedElement::No); } // We have found a `b256::from(Bytes)` call. output.push(lexed_fn_call_info.func.span().into()); if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } let lexed_from_call_path = match lexed_fn_call { Expr::FuncApp { func, args: _ } => match func.as_mut() { Expr::Path(path_expr) => path_expr, _ => { bail!("`func` of the `lexed_fn_call` must be of the variant `Expr::Path`.") } }, _ => bail!("`lexed_fn_call` must be of the variant `Expr::FuncApp`."), }; // Rename the call to `from` to `try_from`. let from_ident = lexed_from_call_path.last_segment_mut(); modify(from_ident).set_name("try_from"); // The call to `try_from` becomes the target of the `unwrap` method call. let target = lexed_fn_call.clone(); let insert_span = Span::empty_at_end(&target.span()); *lexed_fn_call = New::method_call(insert_span, target, "unwrap"); Ok(InvalidateTypedElement::Yes) } } ProgramVisitorMut::visit_program(program_info, dry_run, &mut Visitor {}) } fn replace_bytes_into_b256_with_try_into_b256_step( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { struct Visitor; impl TreesVisitorMut<Occurrence> for Visitor { fn visit_method_call( &mut self, ctx: &VisitingContext, lexed_method_call: &mut Expr, ty_method_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let lexed_method_call_info = LexedMethodCallInfoMut::new(lexed_method_call)?; let ty_method_call_info = ty_method_call .map(|ty_method_call| TyMethodCallInfo::new(ctx.engines.de(), ty_method_call)) .transpose()?; let Some(ty_method_call_info) = ty_method_call_info else { // We need the typed info in order to ensure that the `into` function // is really the `Bytes::into(self) -> b256` function. return Ok(InvalidateTypedElement::No); }; let method_return_type = ctx .engines .te() .get(ty_method_call_info.fn_decl.return_type.type_id); let method_target_is_bytes_struct = match ctx .engines .te() .get(ty_method_call_info.parent_type_id) .as_ref() { TypeInfo::Struct(decl_id) => { let struct_decl = ctx.engines.de().get_struct(decl_id); struct_decl.call_path == CallPath::fullpath(&["std", "bytes", "Bytes"]) } _ => false, }; if !(ty_method_call_info.fn_decl.name.as_str() == "into" && matches!(method_return_type.as_ref(), TypeInfo::B256) && method_target_is_bytes_struct) { return Ok(InvalidateTypedElement::No); } // We have found a `Bytes::into(self) -> b256` call. output.push(lexed_method_call_info.path_seg.span().into()); if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } // Rename the call to `into` to `try_into`. modify(lexed_method_call_info.path_seg).set_name("try_into"); // The call to `try_into` becomes the target of the `unwrap` method call. let target = lexed_method_call.clone(); let insert_span = Span::empty_at_end(&target.span()); *lexed_method_call = New::method_call(insert_span, target, "unwrap"); Ok(InvalidateTypedElement::Yes) } } ProgramVisitorMut::visit_program(program_info, dry_run, &mut Visitor {}) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/partial_eq.rs
forc-plugins/forc-migrate/src/migrations/partial_eq.rs
#![allow(dead_code)] #![allow(deprecated)] use std::vec; use crate::{ internal_error, matching::{ item_impl, lexed_match, lexed_match_mut, literal, ty_match, with_name_mut, LexedLocateAnnotatedMut, LexedLocateAsAnnotatedMut, }, migrations::{ visit_all_modules, visit_all_modules_mut, visit_modules, InteractionResponse, MutProgramInfo, Occurrence, }, modifying::*, print_single_choice_menu, }; use anyhow::{bail, Ok, Result}; use itertools::Itertools; use sway_ast::{ItemKind, Module}; use sway_core::{ language::{ty::TyModule, CallPath}, Engines, }; use sway_error::formatting::{plural_s, Indent}; use sway_types::{Ident, Span, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind, ProgramInfo}; // NOTE: We do not support cases when `Eq` is given another name via `as` alias import. // In practice, this does not happen. // NOTE: We do not support cases when `Eq` is implemented locally within a function. // In practice, this does not happen. // NOTE: We are searching only for standalone `#[cfg(experimental_partial_eq)]` attributes. // For those, we can assume that they are generated using the migration tool, or that // early adoption wasn't using complex patterns that request additional effort in // migration steps. // E.g., if we encounter something like: // #[allow(dead_code), cfg(experimental_references = true, experimental_partial_eq = true)] // we will assume that developers want to have control over the feature adoption and // will not consider such usages in the migration. // NOTE: We could add an additional migration step that suggests inspecting usages of // the `Eq` trait in trait constraints, to see if they could be replaced with `PartialEq`. // The development effort of this additional step is questionable. We would need to extend // visitors to collect all trait constraints, which is a considerable effort. On the other // hand the current types that are constrained all have `Eq` semantics, which is not // changed by the introduction of `PartialEq`. Changing `Eq` constraint to `PartialEq` // to lower the constraint is done in the `std`, where appropriate. // Suggesting to developers doing this replacement in their projects is mentioned // in the tracking issue: https://github.com/FuelLabs/sway/issues/6883. pub(super) const IMPLEMENT_EXPERIMENTAL_PARTIAL_EQ_AND_EQ_TRAITS: MigrationStep = MigrationStep { title: "Implement experimental `PartialEq` and `Eq` traits", duration: 1, kind: MigrationStepKind::CodeModification( implement_experimental_partial_eq_and_eq_traits, &[], // This is an intermediate migration for early adopting the feature. ContinueMigrationProcess::Never, ), help: &[ "Migration will implement `PartialEq` and `Eq` traits for every type", "that implements `Eq` trait.", " ", "The new implementations will be marked as `#[cfg(experimental_partial_eq = true)]`.", " ", "The original `Eq` implementation will remain in code and be marked as", "`#[cfg(experimental_partial_eq = false)]`.", ], }; pub(super) const REMOVE_DEPRECATED_EQ_TRAIT_IMPLEMENTATIONS: MigrationStep = MigrationStep { title: "Remove deprecated `Eq` trait implementations and `experimental_partial_eq` attributes", duration: 1, kind: MigrationStepKind::Interaction( remove_deprecated_eq_trait_implementations_instruction, remove_deprecated_eq_trait_implementations_interaction, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will:", " - remove deprecated `Eq` implementations.", " - remove the `#[cfg(experimental_partial_eq = true)]` attributes from the new `Eq`", " and `PartialEq` implementations.", " ", "Run this migration only if you are switching fully to the `partial_eq` feature,", "and do not plan to use the old `Eq` implementations anymore.", ], }; const EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE: &str = "experimental_partial_eq"; fn remove_deprecated_eq_trait_implementations_instruction( program_info: &ProgramInfo, ) -> Result<Vec<Occurrence>> { fn remove_deprecated_eq_trait_implementations_instruction_impl( _engines: &Engines, module: &Module, _ty_module: &TyModule, _dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut result = vec![]; // Note that the typed program, depending if the `forc migrate` was run with or // without the `partial_eq` flag, might or might not have the deprecated implementations // represented in the typed tree. // This is not an issue, because if, in the lexed tree, we find a trait impl of the trait // named `Eq` that has the `#[cfg(experimental_partial_eq = false)]` attribute, this is // enough to identify it as a deprecated `Eq` trait implementation. // The deprecated `Eq` implementation: // - has the `eq` method in the body, so it must not be empty, // - is annotated with `#[cfg(experimental_partial_eq = false)]`. result.append( &mut find_trait_impl( module, "Eq", false, false, ResultSpanSource::ImplTraitDefinition, ) .iter() .map(|span| span.clone().into()) .collect(), ); // The new `Eq` implementation: // - has an empty impl. // - is annotated with `#[cfg(experimental_partial_eq = true)]`. result.append( &mut find_trait_impl(module, "Eq", true, true, ResultSpanSource::CfgAttribute) .iter() .map(|span| span.clone().into()) .collect(), ); // The new `PartialEq` implementation: // - has the `eq` method in the body, so it must not be empty, // - is annotated with `#[cfg(experimental_partial_eq = true)]`. result.append( &mut find_trait_impl( module, "PartialEq", false, true, ResultSpanSource::CfgAttribute, ) .iter() .map(|span| span.clone().into()) .collect(), ); Ok(result) } let res = visit_all_modules( program_info, DryRun::Yes, remove_deprecated_eq_trait_implementations_instruction_impl, )?; Ok(res.into_iter().flatten().collect()) } fn remove_deprecated_eq_trait_implementations_interaction( program_info: &mut MutProgramInfo, ) -> Result<(InteractionResponse, Vec<Occurrence>)> { /// Calculates and returns: /// - number of deprecated `Eq` impls to remove, /// - number of `#[cfg(experimental_partial_eq = true)]` to remove from new `Eq` impls, /// - number of `#[cfg(experimental_partial_eq = true)]` to remove from new `PartialEq` impls. fn calculate_number_of_code_removals( _engines: &Engines, module: &Module, _ty_module: &TyModule, _dry_run: DryRun, ) -> Result<Vec<(usize, usize, usize)>> { // We will conveniently reuse the `find_trait_impl` function here. let num_of_deprecated_eq_impls = find_trait_impl( module, "Eq", false, false, ResultSpanSource::ImplTraitDefinition, ) .len(); let num_of_cfg_attrs_on_new_eq = find_trait_impl(module, "Eq", true, true, ResultSpanSource::CfgAttribute).len(); let num_of_cfg_attrs_on_new_partial_eq = find_trait_impl( module, "PartialEq", false, true, ResultSpanSource::CfgAttribute, ) .len(); Ok(vec![( num_of_deprecated_eq_impls, num_of_cfg_attrs_on_new_eq, num_of_cfg_attrs_on_new_partial_eq, )]) } let numbers_of_code_removals_per_module = visit_modules( program_info.engines, &program_info.lexed_program.root, &program_info.ty_program.root_module, DryRun::Yes, calculate_number_of_code_removals, )?; let ( num_of_deprecated_eq_impls, num_of_cfg_attrs_on_new_eq, num_of_cfg_attrs_on_new_partial_eq, ) = numbers_of_code_removals_per_module .into_iter() .flatten() .fold((0, 0, 0), |acc, counts| { (acc.0 + counts.0, acc.1 + counts.1, acc.2 + counts.2) }); if num_of_deprecated_eq_impls == 0 && num_of_cfg_attrs_on_new_eq == 0 && num_of_cfg_attrs_on_new_partial_eq == 0 { return Ok((InteractionResponse::None, vec![])); } println!("The following code will be removed:"); if num_of_deprecated_eq_impls > 0 { println!( "{}- {} deprecated `Eq` implementation{}.", Indent::Single, num_of_deprecated_eq_impls, plural_s(num_of_deprecated_eq_impls) ); } if num_of_cfg_attrs_on_new_eq > 0 { println!("{}- {} `#[cfg(experimental_partial_eq = true)]` attributes from new `Eq` implementation{}.", Indent::Single, num_of_cfg_attrs_on_new_eq, plural_s(num_of_cfg_attrs_on_new_eq)); } if num_of_cfg_attrs_on_new_partial_eq > 0 { println!("{}- {} `#[cfg(experimental_partial_eq = true)]` attributes from new `PartialEq` implementation{}.", Indent::Single, num_of_cfg_attrs_on_new_partial_eq, plural_s(num_of_cfg_attrs_on_new_partial_eq)); } println!(); println!("Do you want to remove that code and switch fully to the `partial_eq` feature?"); println!(); if print_single_choice_menu(&[ "Yes, remove the code and switch fully to the `partial_eq` feature.", "No, continue using deprecated `Eq` and the new `PartialEq` and `Eq` side-by-side.", ]) != 0 { return Ok((InteractionResponse::PostponeStep, vec![])); } // Execute the migration step. let mut result = vec![]; fn remove_deprecated_eq_impls( _engines: &Engines, module: &mut Module, _ty_module: &TyModule, _dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut occurrences_of_annotated_eq_impls_to_remove: Vec<Occurrence> = vec![]; // Deprecated `Eq` impls must not be empty, they have the `eq` method. let annotated_eq_impls = lexed_match::impl_self_or_trait_decls_annotated(module).filter(|annotated| { matches!(&annotated.value, ItemKind::Impl(item_impl) if item_impl::implements_trait("Eq")(&item_impl) && !item_impl.contents.inner.is_empty() ) }); // For every empty `Eq` trait implementation... for annotated_eq_impl in annotated_eq_impls { // Check if the `#[cfg(experimental_partial_eq = false)]` attribute exists. if lexed_match::cfg_attribute_standalone_single_arg( &annotated_eq_impl.attributes, EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE, |arg| arg.as_ref().is_some_and(literal::is_bool_false), ) .is_none() { continue; }; // The trait impl passes all conditions, mark it for removal. occurrences_of_annotated_eq_impls_to_remove.push(annotated_eq_impl.span().into()); } for annotated_eq_impl_occurrence in occurrences_of_annotated_eq_impls_to_remove.iter() { modify(module).remove_annotated_item(&annotated_eq_impl_occurrence.span); } Ok(occurrences_of_annotated_eq_impls_to_remove) } let res = visit_all_modules_mut(program_info, DryRun::No, remove_deprecated_eq_impls)?; result.append(&mut res.into_iter().flatten().collect()); fn remove_cfg_experimental_partial_eq_true_attributes( _engines: &Engines, module: &mut Module, _ty_module: &TyModule, _dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut occurrences_of_cfg_attributes_to_remove: Vec<Occurrence> = vec![]; // Find new `Eq` and `PartialEq` impls. let annotated_trait_impls = lexed_match_mut::impl_self_or_trait_decls_annotated(module) .filter_map(|annotated| if matches!(&annotated.value, ItemKind::Impl(item_impl) // New `Eq` impl must be empty, and `PartialEq` not, it has the `eq` method. if item_impl::implements_trait("Eq")(&item_impl) && item_impl.contents.inner.is_empty() || item_impl::implements_trait("PartialEq")(&item_impl) && !item_impl.contents.inner.is_empty()) { Some(annotated) } else { None } ) .collect_vec(); // For every `Eq` and `PartialEq` trait implementation... for annotated_trait_impl in annotated_trait_impls.iter() { // Check if the `#[cfg(experimental_partial_eq = true)]` attribute exists. let Some(cfg_experimental_partial_eq_attr) = lexed_match::cfg_attribute_standalone_single_arg( &annotated_trait_impl.attributes, EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE, |arg| arg.as_ref().is_some_and(literal::is_bool_true), ) else { continue; }; // The trait passes all the conditions, mark the `cfg` attribute for removal. occurrences_of_cfg_attributes_to_remove .push(cfg_experimental_partial_eq_attr.span().into()); } for annotated_trait_impl in annotated_trait_impls { for cfg_attribute_occurrence in occurrences_of_cfg_attributes_to_remove.iter() { modify(annotated_trait_impl) .remove_attribute_decl_containing_attribute(&cfg_attribute_occurrence.span); } } Ok(occurrences_of_cfg_attributes_to_remove) } let res = visit_all_modules_mut( program_info, DryRun::No, remove_cfg_experimental_partial_eq_true_attributes, )?; result.append(&mut res.into_iter().flatten().collect()); Ok((InteractionResponse::ExecuteStep, result)) } enum ResultSpanSource { ImplTraitDefinition, CfgAttribute, } /// Searches for impls of the trait named `trait_name` within the `module`. /// The trait impl must either be empty, or have content, depending on `is_empty_impl`. /// The trait impl must have the `#[cfg(experimental_partial_eq)]` set to bool defined in `cfg_experimental_partial_eq`. /// The resulting [Span] points either to the trait impl definition (without the where clause and the content), /// or to the `cfg` attribute. fn find_trait_impl( module: &Module, trait_name: &str, is_empty_impl: bool, cfg_experimental_partial_eq: bool, result_span_source: ResultSpanSource, ) -> Vec<Span> { let mut result = vec![]; // Find impls of the trait given by the `trait_name`. let attributed_eq_trait_impls = lexed_match::impl_self_or_trait_decls_annotated(module) .filter_map(|annotated| match &annotated.value { ItemKind::Impl(item_impl) if item_impl::implements_trait(trait_name)(&item_impl) => { Some((&annotated.attributes, item_impl)) } _ => None, }); // For every trait implementation... for (attributes, eq_trait_impl) in attributed_eq_trait_impls { // Check if the impl body is empty or not, and same as expected. if eq_trait_impl.contents.inner.is_empty() != is_empty_impl { continue; } // Check if the `#[cfg(experimental_partial_eq)]` attribute exists and is set to `cfg_experimental_partial_eq`. let expected_bool_literal = if cfg_experimental_partial_eq { literal::is_bool_true } else { literal::is_bool_false }; let Some(cfg_experimental_partial_eq_attr) = lexed_match::cfg_attribute_standalone_single_arg( attributes, EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE, |arg| arg.as_ref().is_some_and(expected_bool_literal), ) else { continue; }; // The trait passes all the conditions, add it to the result. let result_span = match result_span_source { ResultSpanSource::ImplTraitDefinition => { Span::join(eq_trait_impl.impl_token.span(), &eq_trait_impl.ty.span()) } ResultSpanSource::CfgAttribute => cfg_experimental_partial_eq_attr.span(), }; result.push(result_span); } result } fn implement_experimental_partial_eq_and_eq_traits( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { fn implement_experimental_partial_eq_and_eq_traits_impl( engines: &Engines, lexed_module: &mut Module, ty_module: &TyModule, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut result = vec![]; let std_ops_eq_call_path = CallPath::fullpath(&["std", "ops", "Eq"]); let ty_impl_traits = ty_match::impl_self_or_trait_decls(ty_module) .map(|decl| engines.de().get_impl_self_or_trait(decl)) .filter(|decl| decl.is_impl_trait()) .collect_vec(); for ty_impl_trait in ty_impl_traits { let implemented_trait = engines.de().get_trait( &ty_impl_trait .implemented_trait_decl_id() .expect("impl is a trait impl"), ); // Further inspect only `Eq` impls. if implemented_trait.call_path != std_ops_eq_call_path { continue; } let Some((attributes, lexed_impl_eq_trait)) = lexed_module.locate_annotated_mut(&ty_impl_trait) else { bail!(internal_error( "Lexical trait implementation cannot be found." )); }; // If the impl already has `experimental_partial_eq` attribute set, we assume that the migration // is already done for this impl. Note that we don't check if it is set to true or false. // Just the existence of the attribute, being on the old `Eq` (false), or the new `Eq` (true), // indicates that the `partial_eq` migrations are done for this occurrence, or that developers // manually early adopted the feature. if lexed_match_mut::cfg_attribute_arg( attributes, with_name_mut(EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE), ) .is_some() { continue; }; // Check that this is the old `Eq` implementation, with the `eq` method. // If it is the new, empty one, skip it. if lexed_impl_eq_trait.contents.inner.is_empty() { continue; } result.push( Span::join( lexed_impl_eq_trait.impl_token.span(), &lexed_impl_eq_trait.ty.span(), ) .into(), ); if dry_run == DryRun::Yes { continue; } // No dry run, perform the changes. // 1. Append the `cfg[experimental_partial_eq = false]` to the existing attributes. let insert_span = if attributes.is_empty() { Span::empty_at_start(&lexed_impl_eq_trait.span()) } else { Span::empty_at_end(&attributes.last().expect("attributes are not empty").span()) }; let cfg_attribute_decl = New::cfg_experimental_attribute_decl(insert_span.clone(), "partial_eq", false); attributes.push(cfg_attribute_decl); // 2. Insert the `PartialEq` and new empty `Eq` implementation. let Some(annotated_impl_eq_trait) = lexed_module.locate_as_annotated_mut(&ty_impl_trait) else { bail!(internal_error( "Annotated lexical trait implementation cannot be found." )); }; let mut annotated_impl_partial_eq_trait = annotated_impl_eq_trait.clone(); // Set the `experimental_partial_eq` attribute to true. let Some(experimental_partial_eq_arg) = lexed_match_mut::cfg_attribute_arg( &mut annotated_impl_partial_eq_trait.attributes, with_name_mut(EXPERIMENTAL_PARTIAL_EQ_ATTRIBUTE), ) else { bail!(internal_error( "Attribute \"experimental_partial_eq\" cannot be found." )); }; experimental_partial_eq_arg.value = Some(New::literal_bool(insert_span, true)); // Define the new `Eq` trait simply by removing the content form the `PartialEq` trait. let mut annotated_impl_new_eq_trait = annotated_impl_partial_eq_trait.clone(); let ItemKind::Impl(impl_new_eq_trait) = &mut annotated_impl_new_eq_trait.value else { bail!(internal_error( "Annotated implementation of \"Eq\" trait is not an `Item::Impl`." )); }; impl_new_eq_trait.contents.inner.clear(); // Rename the `Eq` to `PartialEq` in the new `PartialEq` trait. let ItemKind::Impl(impl_partial_eq_trait) = &mut annotated_impl_partial_eq_trait.value else { bail!(internal_error( "Annotated implementation of \"Eq\" trait is not an `Item::Impl`." )); }; let path_type_last_ident = impl_partial_eq_trait .trait_opt .as_mut() .expect("impl implements `Eq` trait") .0 .last_segment_mut(); path_type_last_ident.name = Ident::new_with_override("PartialEq".into(), path_type_last_ident.name.span()); // If the original `Eq` impl had `Eq`s in trait constraints, replace those with `PartialEq`. let eq_trait_constraints = lexed_match_mut::trait_constraints(impl_partial_eq_trait, with_name_mut("Eq")); for eq_trait_constraint in eq_trait_constraints { let path_type_last_ident = eq_trait_constraint.last_segment_mut(); path_type_last_ident.name = Ident::new_with_override("PartialEq".into(), path_type_last_ident.name.span()); } // Add the new trait impls to the items. // let mut module_modifier = Modifier::new(lexed_module); // module_modifier modify(lexed_module) // Inserting in reverse order so that `PartialEq` ends up before `Eq`, // since they have the same span start which equals the span of the original `Eq`. .insert_annotated_item_after(annotated_impl_new_eq_trait) .insert_annotated_item_after(annotated_impl_partial_eq_trait); // Note that we do not need to adjust the `use` statements to include `PartialEq`. // All `std::ops` are a part of the std's prelude. If there was a `use Eq` // in a modified file, it was actually not needed. } Ok(result) } let res = visit_all_modules_mut( program_info, dry_run, implement_experimental_partial_eq_and_eq_traits_impl, )?; Ok(res.into_iter().flatten().collect()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/merge_core_std.rs
forc-plugins/forc-migrate/src/migrations/merge_core_std.rs
#![allow(dead_code)] use std::{sync::Arc, vec}; use crate::{ migrations::{InteractionResponse, MutProgramInfo, Occurrence}, print_single_choice_menu, visiting::{ InvalidateTypedElement, LexedFnCallInfo, LexedFnCallInfoMut, ProgramVisitor, ProgramVisitorMut, TreesVisitor, TreesVisitorMut, VisitingContext, }, }; use anyhow::{Ok, Result}; use sway_ast::{Expr, ItemImpl, ItemUse, UseTree}; use sway_core::language::ty::{TyExpression, TyImplSelfOrTrait, TyUseStatement}; use sway_types::{Ident, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind, ProgramInfo}; pub(super) const REPLACE_CORE_WITH_STD_IN_PATHS: MigrationStep = MigrationStep { title: "Replace `core` with `std` in paths", duration: 1, kind: MigrationStepKind::Interaction( replace_core_with_std_in_paths_instruction, replace_core_with_std_in_paths_interaction, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will replace all occurrences of `core` with `std` in paths.", " ", "E.g.:", " use core::ops::*;", "will become:", " use std::ops::*;", " ", "Run this migration only if you are switching fully to the `merge_core_std` feature,", "and do not plan to use the old, separated, standard libraries anymore.", ], }; fn replace_core_with_std_in_paths_instruction( program_info: &ProgramInfo, ) -> Result<Vec<Occurrence>> { struct Visitor; impl TreesVisitor<Occurrence> for Visitor { fn visit_use( &mut self, _ctx: &VisitingContext, lexed_use: &ItemUse, _ty_use: Option<&TyUseStatement>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let path_prefix = match &lexed_use.tree { UseTree::Path { prefix, .. } => Some(prefix), _ => None, }; let Some(path_prefix) = path_prefix else { return Ok(InvalidateTypedElement::No); }; if lexed_use.root_import.is_none() && path_prefix.as_str() == "core" { output.push(path_prefix.span().into()); } Ok(InvalidateTypedElement::No) } fn visit_impl( &mut self, _ctx: &VisitingContext, lexed_impl: &ItemImpl, _ty_impl: Option<Arc<TyImplSelfOrTrait>>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let Some((trait_path, _for_token)) = &lexed_impl.trait_opt else { return Ok(InvalidateTypedElement::No); }; if trait_path.root_opt.is_none() && trait_path.prefix.name.as_str() == "core" { output.push(trait_path.prefix.span().into()); } Ok(InvalidateTypedElement::No) } fn visit_fn_call( &mut self, _ctx: &VisitingContext, lexed_fn_call: &Expr, _ty_fn_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let lexed_fn_call_info = LexedFnCallInfo::new(lexed_fn_call)?; let fn_path = match lexed_fn_call_info.func { Expr::Path(path) => Some(path), _ => None, }; let Some(fn_path) = fn_path else { return Ok(InvalidateTypedElement::No); }; if fn_path.root_opt.is_none() && !fn_path.suffix.is_empty() && fn_path.prefix.name.as_str() == "core" { output.push(fn_path.prefix.span().into()); } Ok(InvalidateTypedElement::No) } } ProgramVisitor::visit_program(program_info, DryRun::Yes, &mut Visitor {}) } fn replace_core_with_std_in_paths_interaction( program_info: &mut MutProgramInfo, ) -> Result<(InteractionResponse, Vec<Occurrence>)> { println!("All the occurrences of `core` shown above will be replaced with `std`."); println!(); println!("Do you want to replace those occurrences and switch fully to the `merge_core_std` feature?"); println!(); if print_single_choice_menu(&[ "Yes, replace `core` with `std` and switch fully to the `merge_core_std` feature.", "No, continue using `core` and `std` as separated libraries.", ]) != 0 { return Ok((InteractionResponse::PostponeStep, vec![])); } // Execute the migration step. struct Visitor; impl TreesVisitorMut<Occurrence> for Visitor { // In all of the cases, we keep the old span and just override the name. fn visit_use( &mut self, _ctx: &VisitingContext, lexed_use: &mut ItemUse, _ty_use: Option<&TyUseStatement>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let path_prefix = match &mut lexed_use.tree { UseTree::Path { prefix, .. } => Some(prefix), _ => None, }; let Some(path_prefix) = path_prefix else { return Ok(InvalidateTypedElement::No); }; if lexed_use.root_import.is_none() && path_prefix.as_str() == "core" { output.push(path_prefix.span().into()); *path_prefix = Ident::new_with_override("std".to_string(), path_prefix.span()); } Ok(InvalidateTypedElement::Yes) } fn visit_impl( &mut self, _ctx: &VisitingContext, lexed_impl: &mut ItemImpl, _ty_impl: Option<Arc<TyImplSelfOrTrait>>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let Some((trait_path, _for_token)) = &mut lexed_impl.trait_opt else { return Ok(InvalidateTypedElement::No); }; if trait_path.root_opt.is_none() && trait_path.prefix.name.as_str() == "core" { output.push(trait_path.prefix.span().into()); trait_path.prefix.name = Ident::new_with_override("std".to_string(), trait_path.prefix.span()); } Ok(InvalidateTypedElement::Yes) } fn visit_fn_call( &mut self, _ctx: &VisitingContext, lexed_fn_call: &mut Expr, _ty_fn_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let lexed_fn_call_info = LexedFnCallInfoMut::new(lexed_fn_call)?; let fn_path = match lexed_fn_call_info.func { Expr::Path(path) => Some(path), _ => None, }; let Some(fn_path) = fn_path else { return Ok(InvalidateTypedElement::No); }; if fn_path.root_opt.is_none() && !fn_path.suffix.is_empty() && fn_path.prefix.name.as_str() == "core" { output.push(fn_path.prefix.span().into()); fn_path.prefix.name = Ident::new_with_override("std".to_string(), fn_path.prefix.span()); } Ok(InvalidateTypedElement::Yes) } } ProgramVisitorMut::visit_program(program_info, DryRun::No, &mut Visitor {}) .map(|result| (InteractionResponse::ExecuteStep, result)) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/storage_domains.rs
forc-plugins/forc-migrate/src/migrations/storage_domains.rs
#![allow(dead_code)] use std::collections::HashSet; use super::{ContinueMigrationProcess, MigrationStep, MigrationStepKind, MutProgramInfo}; use crate::{ internal_error, matching::{ lexed_match_mut, lexed_storage_field, ty_match, ty_storage_field::{with_in_keyword, without_in_keyword}, TyLocate, }, migrations::{InteractionResponse, Occurrence, ProgramInfo}, modifying::*, print_single_choice_menu, }; use anyhow::{bail, Ok, Result}; use itertools::Itertools; use num_bigint::BigUint; use sha2::{Digest, Sha256}; use sway_core::language::{ ty::{TyExpressionVariant, TyStorageField}, CallPath, Literal, }; use sway_error::formatting::{self, sequence_to_list}; use sway_types::Spanned; pub(super) const REVIEW_STORAGE_SLOT_KEYS_STEP: MigrationStep = MigrationStep { title: "Review explicitly defined slot keys in storage declarations (`in` keywords)", duration: 2, kind: MigrationStepKind::Instruction(review_storage_slot_keys_step), help: &[ "If the slot keys used in `in` keywords represent keys generated for `storage` fields", "by the Sway compiler, those keys might need to be recalculated.", " ", "The previous formula for calculating storage keys was: `sha256(\"storage.<field name>\")`.", "The new formula is: `sha256((0u8, \"storage.<field name>\"))`.", ], }; pub(super) const DEFINE_BACKWARD_COMPATIBLE_STORAGE_SLOT_KEYS_STEP: MigrationStep = MigrationStep { title: "Explicitly define storage slot keys if they need to be backward compatible", // We will be pointing to the storage declaration and offer automatic migration. // In case of a suggestion the manual effort will be reviewing the purpose of the // contract, which we will approximate with 10 minutes. duration: 10, kind: MigrationStepKind::Interaction( define_backward_compatible_storage_slot_keys_step_instruction, define_backward_compatible_storage_slot_keys_step_interaction, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "If the contract owning this storage is behind a proxy, or for any other reason needs", "to use previous storage slot keys, those keys must be explicitly assigned to the", "storage fields by using the `in` keyword.", " ", "E.g.:", " storage {", " field in <previous slot key>: u64 = 0,", " }", " ", "The previous formula for calculating storage keys was: `sha256(\"storage.<field name>\")`.", "The new formula is: `sha256((0u8, \"storage.<field name>\"))`.", ], }; fn review_storage_slot_keys_step(program_info: &ProgramInfo) -> Result<Vec<Occurrence>> { let mut res = vec![]; let Some(storage_decl_id) = ty_match::storage_decl(program_info.ty_program.as_ref()) else { return Ok(res); }; let storage_decl = &*program_info.engines.de().get_storage(&storage_decl_id); let well_known_slot_keys = get_well_known_slot_keys(); let well_known_slot_keys_constants = get_well_known_slot_keys_constants(); for (storage_field, key_expression) in ty_match::storage_fields_deep(storage_decl, with_in_keyword) .iter() .map(|sf| { ( sf, sf.key_expression .as_ref() .expect("storage key has in keyword"), ) }) { // If the key expression represents a well known slot defined in // Sway Standards or Sway Libraries do not suggest to check it. let is_well_known_slot_key = match &key_expression.expression { TyExpressionVariant::Literal(Literal::B256(slot_key)) => { well_known_slot_keys.contains(&BigUint::from_bytes_be(slot_key.as_slice())) } TyExpressionVariant::ConstantExpression { call_path: Some(call_path), .. } => well_known_slot_keys_constants.contains(call_path), _ => false, }; if is_well_known_slot_key { continue; } // If the storage fields are behind a proxy, and must contain the backwards compatibility, // the next migration, will assign them the slots calculated by the previous algorithm. // // If we see that the `in` keyword assigns a literal that corresponds to the slot calculated // by the previous algorithm, we recognize it as backwards compatibility and do not suggest to // review the slot. let is_backward_compatibility_slot_key = match &key_expression.expression { TyExpressionVariant::Literal(Literal::B256(slot_key)) => { slot_key == &get_previous_slot_key(storage_field) } _ => false, }; if is_backward_compatibility_slot_key { continue; } res.push(key_expression.span.clone().into()); } Ok(res) } fn define_backward_compatible_storage_slot_keys_step_instruction( program_info: &ProgramInfo, ) -> Result<Vec<Occurrence>> { let mut res = vec![]; let Some(storage_decl_id) = ty_match::storage_decl(program_info.ty_program.as_ref()) else { return Ok(res); }; let storage_decl = &*program_info.engines.de().get_storage(&storage_decl_id); // It is hard to have any better heuristic here. Essentially, every contract // could be behind a proxy and we do not have a mean to detected that. // So, we will provide the suggestion if the storage has any fields without the `in` keyword. // The suggestion is shown only once on the entire `storage` declaration, // to avoid cluttering. The interaction part of the step will then provide // more detailed information and guide the developers. if !ty_match::storage_fields_deep(storage_decl, without_in_keyword).is_empty() { res.push(storage_decl.span.clone().into()); } Ok(res) } fn define_backward_compatible_storage_slot_keys_step_interaction( program_info: &mut MutProgramInfo, ) -> Result<(InteractionResponse, Vec<Occurrence>)> { let Some(storage_decl_id) = ty_match::storage_decl(program_info.ty_program) else { return Ok((InteractionResponse::None, vec![])); }; let storage_decl = &*program_info.engines.de().get_storage(&storage_decl_id); let storage_fields_without_in_keyword = ty_match::storage_fields_deep(storage_decl, without_in_keyword); println!( "The following storage fields will have slot keys calculated by using the new formula:" ); sequence_to_list( &storage_fields_without_in_keyword .iter() .map(|field| field.full_name()) .collect_vec(), formatting::Indent::Single, 10, ) .iter() .for_each(|field_full_name| println!("{field_full_name}")); println!(); println!("Do you want these fields to have backward compatible storage slot keys, calculated"); println!("by using the previous formula?"); println!(); println!("If yes, this migration step will insert `in` keywords to all of the above fields,"); println!("and calculate the storage slot keys by using the previous formula."); println!(); if print_single_choice_menu(&[ "Yes, assign the backward compatible storage slot keys.", "No, this contract does not require backwards compatibility.", ]) != 0 { return Ok((InteractionResponse::StepNotNeeded, vec![])); } // Execute the migration step. let mut res = vec![]; let Some(storage_declaration) = lexed_match_mut::storage_decl(program_info.lexed_program) else { bail!(internal_error( "Lexical storage declaration cannot be found." )); }; for lexed_storage_field in lexed_match_mut::storage_fields_deep( storage_declaration, lexed_storage_field::without_in_keyword, ) { let Some(ty_storage_field) = storage_decl.locate(lexed_storage_field) else { bail!(internal_error(format!( "Typed storage field \"{}\" cannot be found.", lexed_storage_field.name ))); }; res.push(ty_storage_field.name.span().into()); modify(lexed_storage_field).set_in_key(BigUint::from_bytes_be( get_previous_slot_key(ty_storage_field).as_slice(), )); } Ok((InteractionResponse::ExecuteStep, res)) } /// Returns storage slot keys defined in Sway Standards and Sway Libraries, /// as [BigUint]s that represents `b256` storage addresses. fn get_well_known_slot_keys() -> HashSet<BigUint> { // For SRC14 well-known slot keys see: https://docs.fuel.network/docs/sway-libs/upgradability/#upgradability-library let src14_target = BigUint::parse_bytes( b"7bb458adc1d118713319a5baa00a2d049dd64d2916477d2688d76970c898cd55", 16, ) .unwrap(); let src14_proxy_owner = BigUint::parse_bytes( b"bb79927b15d9259ea316f2ecb2297d6cc8851888a98278c0a2e03e1a091ea754", 16, ) .unwrap(); HashSet::from_iter(vec![src14_target, src14_proxy_owner]) } /// Returns [CallPath]s of constants that hold storage slot keys /// defined in Sway Standards and Sway Libraries. fn get_well_known_slot_keys_constants() -> HashSet<CallPath> { let slot_keys_constants = vec![ // For SRC14 well-known slot keys see: https://docs.fuel.network/docs/sway-libs/upgradability/#upgradability-library ["sway_libs", "upgradability", "PROXY_OWNER_STORAGE"], ["standards", "src14", "SRC14_TARGET_STORAGE"], ] .into_iter() .map(|path_parts| CallPath::fullpath(&path_parts)); HashSet::from_iter(slot_keys_constants) } fn get_previous_slot_key(storage_field: &TyStorageField) -> [u8; 32] { let mut hasher = Sha256::new(); hasher.update(storage_field.full_name()); hasher.finalize().into() }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/references.rs
forc-plugins/forc-migrate/src/migrations/references.rs
#![allow(deprecated)] use std::vec; use crate::migrations::{visit_all_modules_mut, MutProgramInfo, Occurrence}; use anyhow::{Ok, Result}; use itertools::Itertools; use sway_ast::{ keywords::{AmpersandToken, Keyword, MutToken, Token}, Module, }; use sway_core::{language::ty::TyModule, Engines}; use sway_types::{Span, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind}; #[allow(dead_code)] pub(super) const REPLACE_REF_MUT_FN_PARAMETERS_STEP: MigrationStep = MigrationStep { title: "Replace `ref mut` function parameters with `&mut`", duration: 5, kind: MigrationStepKind::CodeModification( replace_ref_mut_fn_parameters_step, &[ "change function callers, by adding `&mut` to passed parameters.", "change function bodies, by dereferencing (`*`) parameters where needed.", ], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will replace `ref mut` function parameters with `&mut`.", "E.g., `ref mut x: u64` will become `x: &mut u64`.", ], }; // TODO: This is an incomplete implementation of the migration step. // It does not search for all possible occurrences of `ref mut`. // It is provided as an example of how complex migrations that // transform code can be written. The complete implementation // will be provided by the time the "references" experimental // feature get out of the experimental phase. // // Also, this migration step will be disabled for the next // breaking change version of Sway. It is currently enabled for // the sake of testing and trying out the `forc migrate` tool. // TODO: Simplify this migration by using matchers and modifiers. fn replace_ref_mut_fn_parameters_step( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { fn replace_ref_mut_fn_parameters_step_impl( _engines: &Engines, module: &mut Module, _ty_module: &TyModule, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut result = vec![]; // TODO: Current implementation inspects only module functions. Extend it // to cover all functions (in traits, self-impls, trait-impls, etc.). for module_fn in module .items .iter_mut() .map(|annotated| &mut annotated.value) .filter_map(|decl| match decl { sway_ast::ItemKind::Fn(module_fn) => Some(module_fn), _ => None, }) { let fn_args = &mut module_fn.fn_signature.arguments.inner; let fn_args = match fn_args { sway_ast::FnArgs::Static(punctuated) => punctuated, sway_ast::FnArgs::NonStatic { .. } => unreachable!( "Module functions are always static and cannot have the `self` argument." ), }; let mut fn_args = fn_args.iter_mut().collect_vec(); if fn_args.is_empty() { continue; } for fn_arg in fn_args.iter_mut() { match &mut fn_arg.pattern { sway_ast::Pattern::Var { reference: ref_opt @ Some(_), mutable: mut_opt @ Some(_), name, } => { // Note that we cannot bind is `Some`s, because we would be mutually borrowing twice, // once in, e.g., `ref_opt` and once in `Some` for its part. // That's why, unfortunately, the `expect`. let result_span = Span::join( ref_opt .as_ref() .expect("`ref_opt` is `Some` in the match arm pattern") .span(), &name.span(), ); result.push(result_span.into()); // Replace `ref mut` with `&mut` if it is not a dry-run. if dry_run == DryRun::No { *ref_opt = None; *mut_opt = None; // We will insert the `&` and `mut` tokens right before the existing argument type. let insert_span = Span::empty_at_start(&fn_arg.ty.span()); // Modify the original type to the reference to it. fn_arg.ty = sway_ast::Ty::Ref { ampersand_token: AmpersandToken::new(insert_span.clone()), mut_token: Some(MutToken::new(insert_span)), ty: Box::new(fn_arg.ty.clone()), }; } // TODO: Find the usages of the function and add `&mut` to the passed parameters. // TODO: Dereference the parameters in the function body. } _ => continue, } } } Ok(result) } let res = visit_all_modules_mut( program_info, dry_run, replace_ref_mut_fn_parameters_step_impl, )?; Ok(res.into_iter().flatten().collect()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/error_type.rs
forc-plugins/forc-migrate/src/migrations/error_type.rs
#![allow(deprecated)] use std::{sync::Arc, vec}; use crate::{ internal_error, migrations::{MutProgramInfo, Occurrence}, modifying::*, visiting::*, }; use anyhow::{bail, Ok, Result}; use sway_ast::{ assignable::ElementAccess, expr, Assignable, Expr, ItemFn, ItemStruct, StatementLet, }; use sway_core::language::{ ty::{ TyExpression, TyExpressionVariant, TyFunctionDecl, TyReassignmentTarget, TyStructDecl, TyVariableDecl, }, CallPathType, }; use sway_types::{Ident, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind}; // NOTE: We assume idiomatic usage of the identifier `panic`. This means we support // its migration only if it is used as a function name, struct field, or variable name. // E.g., renaming `panic` in `struct panic { ... }` is not supported, // as it is not an idiomatic usage. // NOTE: We don't have infrastructure in place for searching for usages of a symbol. // Ideally, if we had it, we would use such infrastructure to rename symbol usages // when its definition get renamed. // Luckily, for this particular migration, it is sufficient to visit specific expression, // like, e.g., function calls, and rename them. // NOTE: We don't support renaming modules named `panic`. The reason is that we have the `str` // module in the standard library, signaling that using keywords as module names is acceptable. #[allow(dead_code)] pub(super) const RENAME_EXISTING_PANIC_IDENTIFIERS_TO_R_PANIC_STEP: MigrationStep = MigrationStep { title: "Rename existing `panic` identifiers to `r#panic`", duration: 0, kind: MigrationStepKind::CodeModification( rename_existing_panic_identifiers_to_r_panic_step, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will rename existing `panic` identifiers in struct fields,", "function names and arguments, and variable names to `r#panic`.", " ", "E.g., `let panic = 42;` will become `let r#panic = 42;`.", ], }; fn rename_existing_panic_identifiers_to_r_panic_step( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { struct Visitor; impl TreesVisitorMut<Occurrence> for Visitor { fn visit_fn_decl( &mut self, ctx: &VisitingContext, lexed_fn: &mut ItemFn, _ty_fn: Option<Arc<TyFunctionDecl>>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { // First, let's check the arguments. for lexed_arg in lexed_fn.fn_signature.arguments.inner.args_mut() { let arg_name = match &mut lexed_arg.pattern { sway_ast::Pattern::Var { name, .. } => name, // A valid identifier in a function argument pattern can only be a variable, // never an enum variant. So we know that this `ident` is a variable. sway_ast::Pattern::AmbiguousSingleIdent(ident) => ident, _ => continue, }; if arg_name.as_raw_ident_str() != "panic" { continue; } output.push(arg_name.span().into()); if ctx.dry_run == DryRun::Yes { continue; } *arg_name = Ident::new_with_raw(arg_name.span(), true); } // Then, the function name. if lexed_fn.fn_signature.name.as_raw_ident_str() != "panic" { return Ok(InvalidateTypedElement::No); } output.push(lexed_fn.fn_signature.name.span().into()); if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } modify(lexed_fn).set_name("r#panic"); Ok(InvalidateTypedElement::No) } fn visit_struct_decl( &mut self, ctx: &VisitingContext, lexed_struct: &mut ItemStruct, _ty_struct: Option<Arc<TyStructDecl>>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { for lexed_field in lexed_struct.fields.inner.iter_mut() { let field_name = &mut lexed_field.value.name; if field_name.as_raw_ident_str() != "panic" { continue; } output.push(field_name.span().into()); if ctx.dry_run == DryRun::Yes { continue; } *field_name = Ident::new_with_raw(field_name.span(), true); } Ok(InvalidateTypedElement::No) } fn visit_fn_call( &mut self, ctx: &VisitingContext, lexed_fn_call: &mut Expr, ty_fn_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { // We report the occurrences only if it is not a dry-run. if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } let Some(ty_fn_call) = ty_fn_call else { // Without the typed function call, we cannot proceed // because we cannot check if the function is actually defined in the current package. return Ok(InvalidateTypedElement::No); }; let Expr::FuncApp { func, args: _ } = lexed_fn_call else { bail!(internal_error("`lexed_fn_call` is not an `Expr::FuncApp`.")); }; let Expr::Path(path) = &mut **func else { // We are interested only in function calls that are paths. // Only such calls can be renamed. return Ok(InvalidateTypedElement::No); }; let last_segment = path.last_segment_mut(); if last_segment.name.as_raw_ident_str() != "panic" { return Ok(InvalidateTypedElement::No); } // Check if the function is actually defined in the current package. let TyExpressionVariant::FunctionApplication { fn_ref, .. } = &ty_fn_call.expression else { bail!(internal_error( "`ty_fn_call` is not a `TyExpressionVariant::FunctionApplication`." )); }; let ty_fn = ctx.engines.de().get_function(fn_ref.id()); // We need the full path to the function to ensure it is defined in the current package. if ty_fn.call_path.callpath_type != CallPathType::Full { return Ok(InvalidateTypedElement::No); } let Some(fn_pkg_name) = ty_fn.call_path.prefixes.first() else { return Ok(InvalidateTypedElement::No); }; if fn_pkg_name.as_str() != ctx.pkg_name { return Ok(InvalidateTypedElement::No); } output.push(last_segment.span().into()); modify(last_segment).set_name("r#panic"); Ok(InvalidateTypedElement::No) } fn visit_method_call( &mut self, ctx: &VisitingContext, lexed_method_call: &mut Expr, ty_method_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { // We report the occurrences only if it is not a dry-run. if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } let lexed_method_call_info = LexedMethodCallInfoMut::new(lexed_method_call)?; let ty_method_call_info = ty_method_call .map(|ty_method_call| TyMethodCallInfo::new(ctx.engines.de(), ty_method_call)) .transpose()?; let Some(ty_method_call_info) = ty_method_call_info else { // Without the typed method call, we cannot proceed // because we cannot check if the method is actually defined in the current package. return Ok(InvalidateTypedElement::No); }; if lexed_method_call_info.path_seg.name.as_raw_ident_str() != "panic" { return Ok(InvalidateTypedElement::No); } let ty_method = ty_method_call_info.fn_decl; // We need the full path to the function to ensure it is defined in the current package. if ty_method.call_path.callpath_type != CallPathType::Full { return Ok(InvalidateTypedElement::No); } let Some(fn_pkg_name) = ty_method.call_path.prefixes.first() else { return Ok(InvalidateTypedElement::No); }; if fn_pkg_name.as_str() != ctx.pkg_name { return Ok(InvalidateTypedElement::No); } output.push(lexed_method_call_info.path_seg.span().into()); modify(lexed_method_call_info.path_seg).set_name("r#panic"); Ok(InvalidateTypedElement::No) } fn visit_statement_let( &mut self, ctx: &VisitingContext, lexed_let: &mut StatementLet, _ty_var_decl: Option<&TyVariableDecl>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let var_name = match &mut lexed_let.pattern { sway_ast::Pattern::Var { name, .. } => name, // A valid identifier in a variable name pattern can only be a variable, // never an enum variant. So we know that this `ident` is a variable. sway_ast::Pattern::AmbiguousSingleIdent(ident) => ident, _ => { // NOTE: We don't support renaming `panic` in patterns other than variables, // e.g., in deconstruction patterns. return Ok(InvalidateTypedElement::No); } }; if var_name.as_raw_ident_str() != "panic" { return Ok(InvalidateTypedElement::No); } output.push(var_name.span().into()); if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } *var_name = Ident::new_with_raw(var_name.span(), true); Ok(InvalidateTypedElement::No) } fn visit_expr( &mut self, ctx: &VisitingContext, lexed_expr: &mut Expr, _ty_expr: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { // We report the occurrences only if it is not a dry-run. if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } let var_names = match lexed_expr { Expr::Path(path) if path.suffix.is_empty() => vec![&mut path.prefix.name], Expr::Struct { fields, .. } => fields .inner .iter_mut() .map(|field| &mut field.field_name) .collect(), Expr::FieldProjection { name, .. } => vec![name], _ => vec![], }; for var_name in var_names .into_iter() .filter(|n| n.as_raw_ident_str() == "panic") { output.push(var_name.span().into()); *var_name = Ident::new_with_raw(var_name.span(), true); } Ok(InvalidateTypedElement::No) } fn visit_reassignment( &mut self, ctx: &VisitingContext, _lexed_op: &mut expr::ReassignmentOp, lexed_lhs: &mut Assignable, _ty_lhs: Option<&TyReassignmentTarget>, _lexed_rhs: &mut Expr, _ty_rhs: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { // On the LHS, we support renaming `panic` only in these cases: // - Variable names, e.g., `let panic = 42;` // - Single field access, e.g., `let x.panic = 42;` // But occurrences in, e.g., `foo[panic].x = 42;` will not be renamed. // Full traversal of reassignments' LHS will be done as a part of migration // infrastructure in the future. // We report the occurrences only if it is not a dry-run. if ctx.dry_run == DryRun::Yes { return Ok(InvalidateTypedElement::No); } let var_names = match lexed_lhs { Assignable::ElementAccess(element_access) => match element_access { ElementAccess::Var(name) => vec![name], ElementAccess::FieldProjection { target: element_access, name, .. } => { let mut names = vec![name]; if let ElementAccess::Var(name) = &mut **element_access { names.push(name) }; names } ElementAccess::TupleFieldProjection { target: element_access, .. } | ElementAccess::Index { target: element_access, .. } => match &mut **element_access { ElementAccess::Var(name) => vec![name], _ => vec![], }, _ => vec![], }, Assignable::Deref { .. } => vec![], }; for var_name in var_names .into_iter() .filter(|n| n.as_raw_ident_str() == "panic") { output.push(var_name.span().into()); *var_name = Ident::new_with_raw(var_name.span(), true); } Ok(InvalidateTypedElement::No) } } ProgramVisitorMut::visit_program(program_info, dry_run, &mut Visitor {}) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/new_hashing.rs
forc-plugins/forc-migrate/src/migrations/new_hashing.rs
#![allow(dead_code)] use std::collections::HashSet; use super::{MigrationStep, MigrationStepKind}; use crate::{ migrations::{DryRun, Occurrence, ProgramInfo}, visiting::{ InvalidateTypedElement, LexedFnCallInfo, ProgramVisitor, TreesVisitor, TyFnCallInfo, VisitingContext, }, }; use anyhow::{Ok, Result}; use sway_ast::{Expr, StorageField, Ty}; use sway_core::{ language::{ ty::{TyExpression, TyStorageField, TyStructDecl}, CallPath, }, Engines, TypeId, TypeInfo, TypeParameter, }; use sway_error::formatting::{plural_s, sequence_to_str, Enclosing}; use sway_types::Spanned; pub(super) const REVIEW_EXISTING_USAGES_OF_STORAGE_MAP_SHA256_AND_KECCAK256: MigrationStep = MigrationStep { title: "Review existing usages of `StorageMap`, `sha256`, and `keccak256`", duration: 10, kind: MigrationStepKind::Instruction(review_existing_usages_of_storage_map_sha256_and_keccak256), help: &[ "New hashing changes the hashes of instances of the following types:", " - string slices (`str`)", " - string arrays (`str[N]`)", " - arrays (`[T; N]`)", " - raw slices (`raw_slice`)", " - vectors (`std::vec::Vec<T>`)", " - bytes (`std::bytes::Bytes`)", " ", "To decide if opting-in to new hashing is backward-compatible and safe or not,", "review if those types are directly used, or are contained in types:", " - used as keys in `StorageMap`s,", " - used in custom storage types,", " - hashed using `sha256` or `keccak256` functions.", " ", "╔═════════════════════════════════════════════════════════════════════════════════════╗", "║ The above occurrences must not be seen as comprehensive, but rather as a guideline. ║", "║ Carefully review all the storage access and hashing patterns in your code. ║", "║ E.g., using precomputed hashes, having custom `Hash` implementations, and similar. ║", "╚═════════════════════════════════════════════════════════════════════════════════════╝", ], }; // NOTE: When analyzing storage fields, we expect that the storage types are never nested // inside of non-storage types. // E.g., we don't expect to have a storage fields like these: // field_a: (u8, u8, StorageMap<...>) = (1, 2, StorageMap {}), // field_b: SomeNonStorageTypeStruct<StorageMap<...>> = SomeNonStorageTypeStruct { field: StorageMap {} }, fn review_existing_usages_of_storage_map_sha256_and_keccak256( program_info: &ProgramInfo, ) -> Result<Vec<Occurrence>> { struct Visitor { storage_map_path: CallPath, storage_vec_path: CallPath, non_affected_storage_types_paths: HashSet<CallPath>, hash_functions_paths: HashSet<CallPath>, hash_functions_names: HashSet<&'static str>, affected_std_structs: HashSet<CallPath>, non_affected_std_structs: HashSet<CallPath>, built_in_type_names: HashSet<&'static str>, } impl Visitor { fn new() -> Self { Self { storage_map_path: CallPath::fullpath(&[ "std", "storage", "storage_map", "StorageMap", ]), storage_vec_path: CallPath::fullpath(&[ "std", "storage", "storage_vec", "StorageVec", ]), non_affected_storage_types_paths: HashSet::from_iter( vec![ ["std", "storage", "storage_bytes", "StorageBytes"], ["std", "storage", "storage_string", "StorageString"], ] .into_iter() .map(|path_parts| CallPath::fullpath(&path_parts)), ), hash_functions_paths: HashSet::from_iter( vec![["std", "hash", "sha256"], ["std", "hash", "keccak256"]] .into_iter() .map(|path_parts| CallPath::fullpath(&path_parts)), ), hash_functions_names: HashSet::from_iter(vec!["sha256", "keccak256"]), affected_std_structs: HashSet::from_iter( vec![["std", "vec", "Vec"], ["std", "bytes", "Bytes"]] .into_iter() .map(|path_parts| CallPath::fullpath(&path_parts)), ), non_affected_std_structs: HashSet::from_iter( vec![ ["std", "crypto", "secp256k1", "Secp256k1"], ["std", "crypto", "secp256r1", "Secp256r1"], ["std", "crypto", "message", "Message"], ["std", "crypto", "public_key", "PublicKey"], ] .into_iter() .map(|path_parts| CallPath::fullpath(&path_parts)) .chain(vec![CallPath::fullpath(&["std", "b512", "B512"])]), ), built_in_type_names: HashSet::from_iter(vec![ "()", "!", "bool", "u8", "u16", "u32", "u64", "u256", "b256", ]), } } fn is_known_storage_type(&self, call_path: &CallPath) -> bool { self.non_affected_storage_types_paths.contains(call_path) || self.storage_map_path == *call_path || self.storage_vec_path == *call_path } /// Returns the (affected type name, help message) if the type given by `type_id` is affected by new hashing. /// The affected type name is the name of the type that is actually affected by new hashing. /// It doesn't have to be the same as the type name given by `type_id`. /// E.g., if `type_id` represents a `SomeStruct<str>`, the affected type name will be `str`. fn is_affected_type(&self, engines: &Engines, type_id: TypeId) -> Option<(String, String)> { fn review_type() -> Option<(String, String)> { Some(( "{unknown}".into(), "Review the type of this expression.".into(), )) } fn review_generic_type(type_name: &str) -> Option<(String, String)> { Some((type_name.into(), format!("This has generic type \"{type_name}\". Review all the concrete types used with it."))) } fn review_affected_type( original_type_name: &str, type_name: &str, depth: usize, ) -> Option<(String, String)> { Some((type_name.into(), match depth { 0 => format!("This has type \"{original_type_name}\"."), _ => format!("This has type \"{original_type_name}\", that {}contains \"{type_name}\".", if depth > 1 { "recursively " } else { "" } ), })) } fn is_affected_type_impl( visitor: &Visitor, engines: &Engines, original_type_name: &str, type_id: TypeId, depth: usize, ) -> Option<(String, String)> { match &*engines.te().get(type_id) { // Types not affected by new hashing. TypeInfo::Never | TypeInfo::UnsignedInteger(_) | TypeInfo::ContractCaller { .. } | TypeInfo::Boolean | TypeInfo::B256 | TypeInfo::Numeric | TypeInfo::Contract | TypeInfo::RawUntypedPtr => None, // Generic types. TypeInfo::UnknownGeneric { .. } => review_generic_type(&engines.help_out(type_id).to_string()), // Types that will not occur in a typed program compiled without any errors. // Types like `Unknown` or `ErrorRecovery` will never appear in a // typed program compiled without any errors. Still, we handle // all of them here with the `review_the_type` message, to be on the safe side. TypeInfo::Unknown | TypeInfo::Placeholder(_) | TypeInfo::TypeParam(_) | TypeInfo::UntypedEnum(_) | TypeInfo::UntypedStruct(_) | TypeInfo::Custom { .. } | TypeInfo::ErrorRecovery(_) => review_type(), // Types that are directly affected by new hashing. TypeInfo::StringSlice | TypeInfo::StringArray(_) | TypeInfo::Array(_, _) | TypeInfo::RawUntypedSlice => review_affected_type(original_type_name, &engines.help_out(type_id).to_string(), depth), // Aggregate types that might be directly or indirectly affected by new hashing. TypeInfo::Enum(decl_id) => { let enum_decl = engines.de().get_enum(decl_id); for variant in enum_decl.variants.iter() { if let Some(is_affected) = is_affected_type_impl(visitor, engines, original_type_name, variant.type_argument.type_id, depth + 1) { return Some(is_affected); } } None }, TypeInfo::Struct(decl_id) => { let struct_decl = engines.de().get_struct(decl_id); if visitor.non_affected_std_structs.contains(&struct_decl.call_path) { None } else if visitor.affected_std_structs.contains(&struct_decl.call_path) { review_affected_type(original_type_name, &engines.help_out(type_id).to_string(), depth) } else { for field in struct_decl.fields.iter() { if let Some(is_affected) = is_affected_type_impl(visitor, engines, original_type_name, field.type_argument.type_id, depth + 1) { return Some(is_affected); } } None } }, TypeInfo::Tuple(generic_arguments) => { for generic_argument in generic_arguments.iter() { if let Some(is_affected) = is_affected_type_impl(visitor, engines, original_type_name, generic_argument.type_id, depth + 1) { return Some(is_affected); } } None }, // Types with generic arguments that might be indirectly affected by new hashing. TypeInfo::Ptr(generic_argument) // Typed slices are still not a fully implemented and official feature. // We don't have a `Hash` implementation for them yet, so they are not affected by new hashing. // Still, we will handle the type itself, to be on the safe side. | TypeInfo::Slice(generic_argument) | TypeInfo::Alias { ty: generic_argument, .. } | TypeInfo::Ref { referenced_type: generic_argument, .. } => is_affected_type_impl(visitor, engines, original_type_name, generic_argument.type_id, depth + 1), // Trait type. TypeInfo::TraitType { implemented_in, .. } => is_affected_type_impl(visitor, engines, original_type_name, *implemented_in, depth + 1), } } let original_type_name = engines.help_out(type_id).to_string(); is_affected_type_impl(self, engines, &original_type_name, type_id, 0) } /// Returns a help message if the storage field type `type_id` might be affected by new hashing, or `None` if it is not. fn is_affected_storage_field_type( &self, engines: &Engines, type_id: TypeId, ) -> Option<String> { /// Describes why a storage field is affected by new hashing. #[derive(Default)] struct AffectedStorageField { /// Types of keys of a one or more nested `StorageMap`s that are affected by new hashing. /// E.g., `["str[3]", "[u64; 3]"`. /// The types are ordered left to right, in order of appearance in the storage field type declaration. affected_storage_map_keys: Vec<String>, /// Types that appear in the storage field type declaration and that could be unknown storage types. potential_storage_types: Vec<String>, // Represents situations that should never happen in a typed program compiled without any errors. // E.g., `StorageMap` must have exactly two type parameters. If not, this is an unexpected error. // We handle such errors with a message to review the storage field, to be on the safe side. unexpected_error: bool, } impl AffectedStorageField { /// Returns a help message if the storage field is affected by new hashing, or `None` if it is not. fn help_message(&self) -> Option<String> { if self.affected_storage_map_keys.is_empty() && self.potential_storage_types.is_empty() && !self.unexpected_error { return None; } let message = if self.unexpected_error { "Review this storage field.".into() } else { format!( "Review this storage field, because of {}{}{}.", if self.affected_storage_map_keys.is_empty() { "".to_string() } else { format!( "{} in \"StorageMap\" key{}", sequence_to_str( &self.affected_storage_map_keys, Enclosing::DoubleQuote, usize::MAX ), plural_s(self.affected_storage_map_keys.len()), ) }, if !(self.potential_storage_types.is_empty() || self.affected_storage_map_keys.is_empty()) { " and " } else { "" }, if self.potential_storage_types.is_empty() { "".to_string() } else { format!( "potential custom storage type{} {}", plural_s(self.potential_storage_types.len()), sequence_to_str( &self.potential_storage_types, Enclosing::DoubleQuote, usize::MAX ), ) }, ) }; Some(message) } } fn is_affected_storage_field_type_impl( visitor: &Visitor, engines: &Engines, type_id: TypeId, affected_storage_field: &mut AffectedStorageField, ) { fn get_generic_parameter_type_id(type_parameter: &TypeParameter) -> Option<TypeId> { match type_parameter { TypeParameter::Type(ty) => Some(ty.type_id), TypeParameter::Const(_) => None, } } // We assume that: // - only structs can be storage types, // - only storage types can contain other storage types. // For each category of storage types, we have a visitor function // named `try_visit_***`, that returns `true` if the type is a storage type of that category // and was visited. fn try_visit_non_affected_known_storage_type( visitor: &Visitor, struct_decl: &TyStructDecl, ) -> bool { visitor .non_affected_storage_types_paths .contains(&struct_decl.call_path) } fn try_visit_storage_vec( visitor: &Visitor, engines: &Engines, struct_decl: &TyStructDecl, affected_storage_field: &mut AffectedStorageField, ) -> bool { if visitor.storage_vec_path != struct_decl.call_path { return false; } if struct_decl.generic_parameters.len() != 1 { affected_storage_field.unexpected_error = true; return true; } let element_type_id = get_generic_parameter_type_id(&struct_decl.generic_parameters[0]); if element_type_id.is_none() { affected_storage_field.unexpected_error = true; return true; } let element_type_id = element_type_id.unwrap(); is_affected_storage_field_type_impl( visitor, engines, element_type_id, affected_storage_field, ); true } fn try_visit_unknown_potential_storage_type( visitor: &Visitor, engines: &Engines, struct_decl: &TyStructDecl, struct_name: &str, affected_storage_field: &mut AffectedStorageField, ) -> bool { if visitor.is_known_storage_type(&struct_decl.call_path) { return false; } // Storage types are empty structs. if !struct_decl.fields.is_empty() { return false; } affected_storage_field .potential_storage_types .push(struct_name.to_string()); for generic_parameter in struct_decl.generic_parameters.iter() { if let Some(type_id) = get_generic_parameter_type_id(generic_parameter) { is_affected_storage_field_type_impl( visitor, engines, type_id, affected_storage_field, ); } } true } fn try_visit_storage_map( visitor: &Visitor, engines: &Engines, struct_decl: &TyStructDecl, affected_storage_field: &mut AffectedStorageField, ) -> bool { if visitor.storage_map_path != struct_decl.call_path { return false; } if struct_decl.generic_parameters.len() != 2 { affected_storage_field.unexpected_error = true; return true; } let key_type_id = get_generic_parameter_type_id(&struct_decl.generic_parameters[0]); let value_type_id = get_generic_parameter_type_id(&struct_decl.generic_parameters[1]); if key_type_id.is_none() || value_type_id.is_none() { affected_storage_field.unexpected_error = true; return true; } let key_type_id = key_type_id.unwrap(); let value_type_id = value_type_id.unwrap(); // `StorageMap` itself does not implement `Hash`, so it cannot be a key. // So, for the key, we just check if it is affected by new hashing. if let Some((type_name, _msg)) = visitor.is_affected_type(engines, key_type_id) { affected_storage_field .affected_storage_map_keys .push(type_name); } // For the value, we must check if it is a nested `StorageMap`, or a nested storage type. is_affected_storage_field_type_impl( visitor, engines, value_type_id, affected_storage_field, ); true } if let TypeInfo::Struct(struct_decl) = &*engines.te().get_unaliased(type_id) { let struct_decl = engines.de().get_struct(struct_decl); let _ = try_visit_non_affected_known_storage_type(visitor, &struct_decl) || try_visit_storage_vec( visitor, engines, &struct_decl, affected_storage_field, ) || try_visit_storage_map( visitor, engines, &struct_decl, affected_storage_field, ) || try_visit_unknown_potential_storage_type( visitor, engines, &struct_decl, &engines.help_out(type_id).to_string(), affected_storage_field, ); // Otherwise, we have a regular struct that is not a storage type. } } let mut affected_storage_field = AffectedStorageField::default(); is_affected_storage_field_type_impl( self, engines, type_id, &mut affected_storage_field, ); affected_storage_field.help_message() } } impl TreesVisitor<Occurrence> for Visitor { fn visit_fn_call( &mut self, ctx: &VisitingContext, lexed_fn_call: &Expr, ty_fn_call: Option<&TyExpression>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { let ty_fn_call_info = ty_fn_call .map(|ty_fn_call| TyFnCallInfo::new(ctx.engines.de(), ty_fn_call)) .transpose()?; // If we have the typed call info we can check via function decl if // it is one of the hash functions, even if an alias is used. if let Some(ty_fn_call_info) = ty_fn_call_info { if !self .hash_functions_paths .contains(&ty_fn_call_info.fn_decl.call_path) { return Ok(InvalidateTypedElement::No); } let Some((_arg_name, arg_value)) = ty_fn_call_info.arguments.first() else { // This should never happen. There must be exactly one argument to hash functions. // But if it happens, we mark the whole call for review. output.push(Occurrence::new( lexed_fn_call.span(), format!( "Review this \"{}\" call.", ty_fn_call_info.fn_decl.call_path.suffix ), )); return Ok(InvalidateTypedElement::No); }; let Some((_type_name, help_message)) = self.is_affected_type(ctx.engines, arg_value.return_type) else { return Ok(InvalidateTypedElement::No); }; // We have found a call to a hash function with an affected type. output.push(Occurrence::new(arg_value.span.clone(), help_message)); } else { // If we don't have the typed call info, we can only check the called function name. // If it is one of the hash functions, we mark the call for review. let lexed_fn_call_info = LexedFnCallInfo::new(lexed_fn_call)?; let Expr::Path(path) = lexed_fn_call_info.func else { return Ok(InvalidateTypedElement::No); }; let last_segment = path.last_segment(); if !self .hash_functions_names .contains(&last_segment.name.as_str()) { return Ok(InvalidateTypedElement::No); } output.push(Occurrence::new( lexed_fn_call.span(), format!("Review this \"{}\" call.", last_segment.name.as_str()), )); } Ok(InvalidateTypedElement::No) } fn visit_storage_field_decl( &mut self, ctx: &VisitingContext, lexed_storage_field: &StorageField, ty_storage_field: Option<&TyStorageField>, output: &mut Vec<Occurrence>, ) -> Result<InvalidateTypedElement> { if let Some(ty_field_type) = ty_storage_field.map(|x| x.type_argument.type_id) { let Some(help_message) = self.is_affected_storage_field_type(ctx.engines, ty_field_type) else { return Ok(InvalidateTypedElement::No); }; // We have found an affected storage field. output.push(Occurrence::new( lexed_storage_field.name.span(), help_message, )); } else { match &lexed_storage_field.ty { // We don't expect non-storage types to contain storage types. // Thus, we can ignore tuples and arrays here. Ty::Tuple(_) | Ty::Array(_) // These types cannot contain storage types, or are even not supported // in storage declarations, so we can ignore them as well. | Ty::StringSlice(_) | Ty::StringArray { .. } | Ty::Slice { .. } => {}, // These types cannot appear in a program compiled without any errors. // Still, to be on the safe side, we mark them for review. Ty::Infer { .. } | Ty::Ptr { .. } | Ty::Ref { .. } | Ty::Never { .. } | Ty::Expr(_) => { output.push(Occurrence::new(lexed_storage_field.name.span(), "Review this storage field.".to_string())); }, // Without the typed storage field, we have to be pessimistic and assume that // the storage field type might be affected by new hashing. // To avoid obvious false positives, we check if the storage field type is a built-in type. Ty::Path(path_type) => { // If it is not a built-in type. if !(path_type.root_opt.is_none() && path_type.suffix.is_empty() && path_type.prefix.generics_opt.is_none() && self.built_in_type_names.contains(&path_type.prefix.name.as_str())) { output.push(Occurrence::new(lexed_storage_field.name.span(), "Review this storage field.".to_string())); } }, } } Ok(InvalidateTypedElement::No) } } ProgramVisitor::visit_program(program_info, DryRun::Yes, &mut Visitor::new()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/mod.rs
forc-plugins/forc-migrate/src/migrations/mod.rs
//! This module contains common API for defining and implementing individual //! [MigrationStep]s. //! //! Migration steps are defined in the submodules. Every submodule has the name //! of the corresponding breaking change Sway feature and contains all the //! migration steps needed to migrate to that feature. //! //! The special [demo] submodule contains demo migrations used for learning and testing //! the migration tool. mod demo; mod error_type; mod merge_core_std; mod new_hashing; mod partial_eq; mod references; mod storage_domains; mod try_from_bytes_for_b256; use std::{collections::HashSet, sync::Arc}; use anyhow::{bail, Result}; use duplicate::duplicate_item; use itertools::Itertools; use sway_ast::Module; use sway_core::{ language::{ lexed::{LexedModule, LexedProgram}, ty::{TyModule, TyProgram}, }, Engines, }; use sway_features::Feature; use sway_types::Span; use crate::internal_error; pub(crate) struct ProgramInfo<'a> { /// The name of the current package being migrated. pub pkg_name: String, pub lexed_program: Arc<LexedProgram>, pub ty_program: Arc<TyProgram>, pub engines: &'a Engines, } /// Wrapper over [ProgramInfo] that provides write access /// to the [LexedProgram], but only read access to the /// [TyProgram] and the [Engines]. It is used in migrations /// that modify the source code by altering the lexed program. pub(crate) struct MutProgramInfo<'a> { /// The name of the current package being migrated. pub pkg_name: &'a str, pub lexed_program: &'a mut LexedProgram, pub ty_program: &'a TyProgram, pub engines: &'a Engines, } impl ProgramInfo<'_> { pub(crate) fn as_mut(&mut self) -> MutProgramInfo { MutProgramInfo { pkg_name: &self.pkg_name, // Because the `ProgramsCacheEntry` clones the `programs`, the compilation will always // result in two strong `Arc` references to the `lexed_program`. // Therefore, we must use `Arc::make_mut` to get the copy-on-write behavior. lexed_program: Arc::make_mut(&mut self.lexed_program), ty_program: &self.ty_program, engines: self.engines, } } } /// A single migration step in the overall migration process. pub(crate) struct MigrationStep { /// Migration step unique title. /// /// Formulated as a continuation of a suggestion to a developer: You should \<title\>. /// /// Titles are short, start with a capital letter and do not end in punctuation. /// /// E.g.: Replace `ref mut` function parameters with `&mut` /// /// In particular, titles of the manual migration steps start with "Review". pub title: &'static str, /// An estimated time (in minutes) needed for the manual part of migrating /// a single typical occurrence of the change represented by this step. /// /// The estimate includes **all** the manual effort. /// /// E.g., to replace a single `ref mut` function parameter with `&mut`, the migration /// will change the function signature. The manual part of the effort will be changing /// the callers and eventually adding dereferencing in the function body. /// /// Fully automated migration steps, and only them, can have `duration` set to zero. pub duration: usize, pub kind: MigrationStepKind, /// A short help for the migration step. /// /// If the `kind` is a [MigrationStepKind::CodeModification], start the help /// with "Migration will", to point out that the migration is a (semi)automatic one /// and causes changes in the source file. /// /// E.g.: Migration will replace `ref mut` function parameters with `&mut`. /// /// It is advisable to provide the short help, but it is not mandatory. /// Every migration step will have an automatic help line that points to /// the detailed migration guide provided in the feature tracking issue. pub help: &'static [&'static str], } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)] pub(crate) enum MigrationStepExecution { Manual, Semiautomatic, Automatic, } impl MigrationStep { pub(crate) fn execution(&self) -> MigrationStepExecution { use MigrationStepExecution::*; match self.kind { MigrationStepKind::Instruction(_) => Manual, MigrationStepKind::CodeModification(_, manual_migration_actions, _) if !manual_migration_actions.is_empty() => { Semiautomatic } MigrationStepKind::CodeModification(..) => Automatic, MigrationStepKind::Interaction(..) => Semiautomatic, } } pub(crate) fn has_manual_actions(&self) -> bool { match self.kind { MigrationStepKind::Instruction(_) => true, MigrationStepKind::CodeModification(_, [], _) => false, MigrationStepKind::CodeModification(_, _, _) => true, MigrationStepKind::Interaction(_, _, [], _) => false, MigrationStepKind::Interaction(_, _, _, _) => true, } } } /// Denotes that a migration step that changes the source code should /// be executed in a dry-run mode, means just returning the places in code /// to be changed, but without performing the actual change. #[derive(Clone, Copy, PartialEq, Eq)] pub(crate) enum DryRun { Yes, No, } /// Developer's response during an interactive migration step. #[derive(Clone, Copy, PartialEq, Eq)] pub(crate) enum InteractionResponse { /// There was no interaction with the developer. None, /// Developer opted for executing the migration step and change the code. ExecuteStep, /// Developer communicated that the code change is not needed. StepNotNeeded, /// Developer opted for postponing the migration step. PostponeStep, } /// A single occurrence of a [MigrationStep] report. pub(crate) struct Occurrence { /// The [Span] of the occurrence in the original source code. pub span: Span, /// An optional help message that provides additional /// information about the occurrence. /// /// For most of migration steps, this will be `None`. /// Use it only if it brings valuable additional information /// about the particular [Occurrence]. pub msg: Option<String>, } impl Occurrence { pub fn new(span: Span, msg: String) -> Self { Occurrence { span, msg: Some(msg), } } pub fn msg_or_empty(&self) -> String { self.msg.clone().unwrap_or_default() } } impl From<Span> for Occurrence { fn from(span: Span) -> Self { Occurrence { span, msg: None } } } /// A function that analyses a program given by the [ProgramInfo] and returns /// the [Occurrence]s of all the places in the program code that need to be addressed /// during a manual migration step. /// /// The function does not modify the original program, and can use either the /// [ProgramInfo::lexed_program] or the [ProgramInfo::ty_program], or both, /// to perform the analysis. type InstructionFn = for<'a> fn(&'a ProgramInfo<'a>) -> Result<Vec<Occurrence>>; /// A function that analyses a program given by the [MutProgramInfo] and returns /// the [Occurrence]s of all the places in the **original** program code that will be changed /// during an automatic or semiautomatic migration step. /// /// The function modifies the [LexedProgram] to perform the required code change, /// unless the [DryRun] parameter is set to [DryRun::Yes]. type CodeModificationFn = for<'a> fn(&'a mut MutProgramInfo<'a>, DryRun) -> Result<Vec<Occurrence>>; /// A function that interacts with the developer, eventually modifying the original /// program given by [MutProgramInfo]. The developer's input decides if the modification /// will happen or not. /// /// Returns the [Occurrence]s of all the places in the **original** program code that are /// changed during the interaction, if any, together with the developer's [InteractionResponse]. type InteractionFn = for<'a> fn(&'a mut MutProgramInfo<'a>) -> Result<(InteractionResponse, Vec<Occurrence>)>; /// A function that visits the [Module] and its corresponding [TyModule], /// potentially alters the lexed module, and returns a /// [Result] containing related information about the visited module. /// /// For its usages, see [visit_modules_mut]. type ModuleVisitorMutFn<T> = for<'a> fn(&'a Engines, &'a mut Module, &'a TyModule, DryRun) -> Result<T>; /// A function that visits the [Module] and its corresponding [TyModule], /// and returns a [Result] containing related information about the visited module. /// /// For its usages, see [visit_modules]. type ModuleVisitorFn<T> = for<'a> fn(&'a Engines, &'a Module, &'a TyModule, DryRun) -> Result<T>; /// Defines if the migration process can continue after a code modification /// migration step. #[derive(PartialEq, Eq, Clone, Copy)] pub(crate) enum ContinueMigrationProcess { /// Continue if the step has no manual migration actions specified. /// This is the default and most common option. IfNoManualMigrationActionsNeeded, /// Always stop the migration. This is usually needed only after the /// steps that represent intermediate migration to an experimental /// feature for the purpose of early adoption. /// /// E.g., such step will keep the original code marked with /// experimental feature set to false, and insert the new implementation /// marked with experimental feature set to true. /// /// Continuing migration after such a step would be confusing, /// because the next step would usually offer immediate removal of the /// changes done in the step. Never, } pub(crate) enum MigrationStepKind { /// A migration step that provides instructions to developers, /// and explains a manual action they should take. Instruction(InstructionFn), /// A migration step that automatically modifies the original source code, /// and eventually gives additional instructions to developers, /// for manual post-migration actions. /// /// The [CodeModificationFn] modifies and overwrites the original source code. /// The second parameter are the _manual migration actions_. /// Those actions need to be done by developers after the automatic part /// of the migration is executed. /// /// Manual migration actions start with a small letter and end with a dot. /// /// E.g.: change function callers, by adding `&mut` to passed parameters. /// /// **If a [MigrationStepKind::CodeModification] does not have /// _manual migration actions_ it is considered to be a fully automated migration, /// after witch the migration process can safely continue, unless marked as /// [ContinueMigrationProcess::Never].** CodeModification( CodeModificationFn, &'static [&'static str], ContinueMigrationProcess, ), /// A migration step that first provides instructions to developers, /// and afterwards interacts with them, giving additional instructions /// and asking for additional input. /// /// Based on the input gotten during the interaction, the [InteractionFn] /// can modify the original source code. /// /// The second parameter are the _manual migration actions_. /// Those actions still need to be done by developers after the automatic part /// of the migration is executed during the interaction. /// /// Manual migration actions start with a small letter and end with a dot. /// /// E.g.: change function callers, by adding `&mut` to passed parameters. /// /// **If a [MigrationStepKind::Interaction] does not have /// _manual migration actions_ it is considered to be finished after the interaction, /// after witch the migration process can safely continue, unless marked as /// [ContinueMigrationProcess::Never].** /// /// Note that in a general case, the [InstructionFn] and the [InteractionFn] /// can return different [Span]s. E.g., during the instruction a single /// span can be returned pointing to a module in which the change needs /// to be done, while the interaction will return the actual places in the /// module that were modified. Interaction( InstructionFn, InteractionFn, &'static [&'static str], ContinueMigrationProcess, ), } /// A convenient method for visiting all the modules within a program. /// The `visitor` will be called for every module, and the method will return the /// [Vec] containing the results of all the individual visitor calls. #[deprecated(note = "use `crate::visiting::ProgramVisitor/Mut::visit_program()` instead")] #[allow(deprecated)] pub(crate) fn visit_all_modules<T>( program_info: &ProgramInfo, dry_run: DryRun, visitor: ModuleVisitorFn<T>, ) -> Result<Vec<T>> { visit_modules( program_info.engines, &program_info.lexed_program.root, &program_info.ty_program.root_module, dry_run, visitor, ) } /// A convenient method for visiting all the modules within a program. /// The `visitor` will be called for every module, and the method will return the /// [Vec] containing the results of all the individual visitor calls. /// /// Visitors can mutate the [LexedProgram]. #[deprecated(note = "use `crate::visiting::ProgramVisitor/Mut::visit_program()` instead")] #[allow(deprecated)] pub(crate) fn visit_all_modules_mut<T>( program_info: &mut MutProgramInfo, dry_run: DryRun, visitor: ModuleVisitorMutFn<T>, ) -> Result<Vec<T>> { visit_modules_mut( program_info.engines, &mut program_info.lexed_program.root, &program_info.ty_program.root_module, dry_run, visitor, ) } /// A convenient method for visiting the `lexed_module` and its corresponding `ty_module`, /// and all their submodules, recursively. /// The `visitor` will be called for every module, and the method will return the /// [Vec] containing the results of all the individual visitor calls. #[duplicate_item( __visit_modules __ModuleVisitorFn __ref_type(type) __ref(value) __iter; [visit_modules] [ModuleVisitorFn] [&type] [&value] [iter]; [visit_modules_mut] [ModuleVisitorMutFn] [&mut type] [&mut value] [iter_mut]; )] #[deprecated(note = "use `crate::visiting::ProgramVisitor/Mut::visit_program()` instead")] #[allow(deprecated)] pub(crate) fn __visit_modules<T>( engines: &Engines, lexed_module: __ref_type([LexedModule]), ty_module: &TyModule, dry_run: DryRun, visitor: __ModuleVisitorFn<T>, ) -> Result<Vec<T>> { fn visit_modules_rec<T>( engines: &Engines, lexed_module: __ref_type([LexedModule]), ty_module: &TyModule, dry_run: DryRun, visitor: __ModuleVisitorFn<T>, result: &mut Vec<T>, ) -> Result<()> { let visitor_result = visitor( engines, __ref([lexed_module.tree.value]), ty_module, dry_run, )?; result.push(visitor_result); let mut lexed_submodules = lexed_module.submodules.__iter().collect_vec(); let mut ty_submodules = ty_module.submodules.iter().collect_vec(); if lexed_submodules.len() != ty_submodules.len() { bail!(internal_error(format!( "Lexed module has \"{}\" submodules, and typed module has \"{}\" submodules.", lexed_submodules.len(), ty_submodules.len(), ))); } // The order of submodules is not guaranteed to be the same, hence, sorting by name to // ensure the same ordering. lexed_submodules.sort_by(|a, b| a.0.cmp(&b.0)); ty_submodules.sort_by(|a, b| a.0.cmp(&b.0)); let lexed_submodules = lexed_submodules.__iter(); let ty_submodules = ty_submodules.iter(); for (lexed_submodule, ty_submodule) in lexed_submodules.zip(ty_submodules) { if lexed_submodule.0 != ty_submodule.0 { bail!(internal_error(format!( "Lexed module \"{}\" does not match with the typed module \"{}\".", lexed_submodule.0, ty_submodule.0, ))); } visit_modules_rec( engines, __ref([lexed_submodule.1.module]), &ty_submodule.1.module, dry_run, visitor, result, )?; } Ok(()) } let mut result = vec![]; visit_modules_rec( engines, lexed_module, ty_module, dry_run, visitor, &mut result, )?; Ok(result) } /// Registered [MigrationStep]s. pub(crate) type MigrationSteps = &'static [(Feature, &'static [MigrationStep])]; /// Keeps the number of occurrences of each [MigrationStep] /// after the analysis is executed. pub(crate) type MigrationStepsWithOccurrences<'a> = &'a [(Feature, Vec<(&'a MigrationStep, Option<usize>)>)]; /// Returns a non-empty set of consistent migration steps. /// /// All the CLI commands require at least one migration step. /// This macro conveniently short-circuits and returns, /// if there are no migration steps defined. /// /// Panics if the migration steps are not consistent. #[macro_export] macro_rules! get_migration_steps_or_return { () => {{ let migration_steps = $crate::migrations::get_migration_steps(); if migration_steps.is_empty() { println!("There are currently no migration steps defined for the upcoming breaking change version of Sway."); return Ok(()); } migration_steps }}; } pub(crate) fn get_migration_steps() -> MigrationSteps { assert_migration_steps_consistency(MIGRATION_STEPS); MIGRATION_STEPS } /// Panics if the migration steps are not consistent. fn assert_migration_steps_consistency(migration_steps: MigrationSteps) { if migration_steps.is_empty() { return; } // Each experimental feature can appear only once in the migration steps. let num_of_features_in_migration_steps = migration_steps.len(); let num_of_unique_features_in_migration_steps = migration_steps .iter() .map(|(feature, _)| feature) .collect::<HashSet<_>>() .len(); if num_of_features_in_migration_steps != num_of_unique_features_in_migration_steps { panic!("Inconsistent migration steps: each experimental feature can appear only once in the migration steps."); } // Migration step titles must be unique. let num_of_migration_steps = migration_steps .iter() .map(|(_, steps)| steps.len()) .sum::<usize>(); let num_of_migration_steps_with_unique_title = migration_steps .iter() .flat_map(|(_, steps)| steps.iter().map(|step| step.title)) .collect::<HashSet<_>>() .len(); if num_of_migration_steps != num_of_migration_steps_with_unique_title { panic!("Inconsistent migration steps: migration step titles must be unique."); } // Only fully automatic steps can have duration set to zero. let has_non_automatic_steps_with_zero_duration = migration_steps .iter() .flat_map(|(_, steps)| { steps.iter().map(|step| { ( matches!(step.execution(), MigrationStepExecution::Automatic), step.duration, ) }) }) .any(|(is_automatic, duration)| !is_automatic && duration == 0); if has_non_automatic_steps_with_zero_duration { panic!("Inconsistent migration steps: only fully automatic steps can have duration set to zero."); } } /* ------------------------------ Migration Steps ------------------------------- Below are the actual migration steps. Change those steps for every new breaking change version of Sway, by removing the previous steps and adding the ones relevant for the next breaking change version. */ /// The list of the migration steps, grouped by the Sway feature that causes /// the breaking changes behind the migration steps. const MIGRATION_STEPS: MigrationSteps = &[( Feature::NewHashing, &[new_hashing::REVIEW_EXISTING_USAGES_OF_STORAGE_MAP_SHA256_AND_KECCAK256], )];
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/migrations/demo.rs
forc-plugins/forc-migrate/src/migrations/demo.rs
//! This module contains demo migrations used for learning and testing the migration tool. #![allow(deprecated)] use std::vec; use crate::{ internal_error, matching::{lexed_match, lexed_match_mut, with_name, with_name_mut}, migrations::{visit_all_modules_mut, MutProgramInfo, Occurrence}, modifying::*, }; use anyhow::{bail, Ok, Result}; use sway_ast::Module; use sway_core::{language::ty::TyModule, Engines}; use sway_types::{Span, Spanned}; use super::{ContinueMigrationProcess, DryRun, MigrationStep, MigrationStepKind}; #[allow(dead_code)] pub(super) const INSERT_EMPTY_FUNCTION_STEP: MigrationStep = MigrationStep { title: "Insert `empty_function` at the end of every module", duration: 0, kind: MigrationStepKind::CodeModification( insert_empty_function_step, &[], ContinueMigrationProcess::IfNoManualMigrationActionsNeeded, ), help: &[ "Migration will insert an empty function named `empty_function` at the end of", "every module.", " ", "E.g., `fn empty_function() {}`.", " ", "If a function with that name already exists in the module, it will be", "renamed to `empty_function_old`, and a new one will be inserted.", " ", "If both functions already exist, the migration does not do anything.", ], }; fn insert_empty_function_step( program_info: &mut MutProgramInfo, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { fn insert_empty_function_step_impl( _engines: &Engines, module: &mut Module, _ty_module: &TyModule, dry_run: DryRun, ) -> Result<Vec<Occurrence>> { let mut result = vec![]; let existing_empty_function = lexed_match::functions(module, with_name("empty_function")).next(); let existing_empty_old_function = lexed_match::functions(module, with_name("empty_function_old")).next(); // If the module is empty, in the report, point at the module kind // (`contract`, `script`, `predicate`, or `library`), otherwise, // point at the last item. let report_span = match module.items.last() { Some(annotated_item) => annotated_item.span(), None => module.semicolon_token.span(), }; match (existing_empty_function, existing_empty_old_function) { (Some(_), Some(_)) => { // Code transformations must be idempotent. In this demo, if both functions // already exist, we don't do anything. return Ok(vec![]); } (Some(_), None) => { // `empty_function` exists, but old do not. // Rename the existing `empty_function` to `empty_function_old`, and insert a new `empty_function`. // We report the occurrence of the code relevant for migration... result.push(report_span.clone().into()); // ...and proceed with the code change only if it is not a dry-run. if dry_run == DryRun::Yes { return Ok(result); } let Some(existing_empty_function) = lexed_match_mut::functions(module, with_name_mut("empty_function")).next() else { bail!(internal_error("Existing `empty_function` cannot be found.")); }; modify(existing_empty_function).set_name("empty_function_old"); let insert_span = Span::empty_at_end(&report_span); let empty_function = New::function(insert_span, "empty_function"); modify(module).append_function(empty_function); } (None, _) => { // `empty_function` does not exist, create a new one. result.push(report_span.clone().into()); if dry_run == DryRun::Yes { return Ok(result); } let insert_span = Span::empty_at_end(&report_span); let empty_function = New::function(insert_span, "empty_function"); modify(module).append_function(empty_function); } } Ok(result) } let res = visit_all_modules_mut(program_info, dry_run, insert_empty_function_step_impl)?; Ok(res.into_iter().flatten().collect()) }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/matching/typed_tree.rs
forc-plugins/forc-migrate/src/matching/typed_tree.rs
//! This module contains helper functions for matching elements within a typed program. use super::{any, TyElementsMatcher, TyElementsMatcherDeep, TyLocate}; use sway_ast::StorageField; use sway_core::{ decl_engine::id::DeclId, language::ty::{ TyAstNodeContent, TyDecl, TyImplSelfOrTrait, TyModule, TyProgram, TyStorageDecl, TyStorageField, }, }; use sway_types::Spanned; impl TyElementsMatcher<DeclId<TyStorageDecl>> for TyProgram { fn match_elems<'a, F>(&'a self, predicate: F) -> impl Iterator<Item = &'a DeclId<TyStorageDecl>> where F: Fn(&&'a DeclId<TyStorageDecl>) -> bool + Clone + 'a, DeclId<TyStorageDecl>: 'a, { // Storage can be declared only in the root of a contract. self.root_module.match_elems(predicate) } } impl TyElementsMatcher<DeclId<TyStorageDecl>> for TyModule { fn match_elems<'a, F>(&'a self, predicate: F) -> impl Iterator<Item = &'a DeclId<TyStorageDecl>> where F: Fn(&&'a DeclId<TyStorageDecl>) -> bool + Clone + 'a, DeclId<TyStorageDecl>: 'a, { self.all_nodes .iter() .filter_map(move |decl| match &decl.content { TyAstNodeContent::Declaration(TyDecl::StorageDecl(storage_decl)) => { if predicate(&&storage_decl.decl_id) { Some(&storage_decl.decl_id) } else { None } } _ => None, }) } } impl TyElementsMatcher<TyStorageField> for TyStorageDecl { fn match_elems<'a, F>(&'a self, predicate: F) -> impl Iterator<Item = &'a TyStorageField> where F: Fn(&&'a TyStorageField) -> bool + Clone + 'a, TyStorageField: 'a, { self.fields .iter() // In the `TyStorageDecl`, all the fields are flattened. // But we need to preserve the semantics of non-deep matching // and return only those that are directly under the storage. .filter(|sf| sf.full_name().starts_with("storage.")) .filter(predicate) } } impl TyElementsMatcherDeep<TyStorageField> for TyStorageDecl { fn match_elems_deep<'a, F>(&'a self, predicate: F) -> Vec<&'a TyStorageField> where F: Fn(&&'a TyStorageField) -> bool + Clone + 'a, TyStorageField: 'a, { self.fields.iter().filter(predicate).collect() } } impl TyElementsMatcher<DeclId<TyImplSelfOrTrait>> for TyModule { fn match_elems<'a, F>( &'a self, predicate: F, ) -> impl Iterator<Item = &'a DeclId<TyImplSelfOrTrait>> where F: Fn(&&'a DeclId<TyImplSelfOrTrait>) -> bool + Clone + 'a, DeclId<TyImplSelfOrTrait>: 'a, { self.all_nodes .iter() .filter_map(|node| match &node.content { TyAstNodeContent::Declaration(TyDecl::ImplSelfOrTrait(decl)) => Some(&decl.decl_id), _ => None, }) .filter(predicate) } } impl TyLocate<StorageField, TyStorageField> for TyStorageDecl { fn locate(&self, lexed_element: &StorageField) -> Option<&TyStorageField> { self.fields .iter() .find(|field| field.name.span() == lexed_element.name.span()) } } pub mod matchers { use super::*; pub(crate) fn storage_decl<P>(parent: &P) -> Option<DeclId<TyStorageDecl>> where P: TyElementsMatcher<DeclId<TyStorageDecl>>, { parent.match_elems(any).next().copied() } #[allow(dead_code)] pub(crate) fn storage_fields<'a, P, F>( parent: &'a P, predicate: F, ) -> impl Iterator<Item = &'a TyStorageField> where F: Fn(&&'a TyStorageField) -> bool + Clone + 'a, P: TyElementsMatcher<TyStorageField>, { parent.match_elems(predicate) } pub(crate) fn storage_fields_deep<'a, S, F>( scope: &'a S, predicate: F, ) -> Vec<&'a TyStorageField> where F: Fn(&&'a TyStorageField) -> bool + Clone + 'a, S: TyElementsMatcherDeep<TyStorageField>, { scope.match_elems_deep(predicate) } pub(crate) fn impl_self_or_trait_decls<S>( scope: &S, ) -> impl Iterator<Item = &'_ DeclId<TyImplSelfOrTrait>> where S: TyElementsMatcher<DeclId<TyImplSelfOrTrait>>, { scope.match_elems(any) } } pub mod predicates { pub mod ty_storage_field { use super::super::*; pub(crate) fn with_in_keyword(storage_field: &&TyStorageField) -> bool { storage_field.key_expression.is_some() } pub(crate) fn without_in_keyword(storage_field: &&TyStorageField) -> bool { storage_field.key_expression.is_none() } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/matching/mod.rs
forc-plugins/forc-migrate/src/matching/mod.rs
#![allow(dead_code)] //! This module contains common API for matching elements //! within a lexed or a typed tree. //! //! A typical migration will search for certain elements in the //! lexed or typed tree and modify them within the lexed tree. //! //! In the long term we want to have advanced infrastructure for both //! matching and modifying parts of the trees, as discussed in //! https://github.com/FuelLabs/sway/issues/6836. //! //! Currently, we will start (very) small, by providing reusable //! module functions for matching parts of the trees. //! //! For concrete examples, see the match functions and trait impls //! implemented in the sub-modules. //! //! ## Design decisions //! //! The goal was pragmatic. To create a simple to develop and extend API that //! will offer easy discoverability of provided functions and methods, all in //! order to move cumbersome and error-prone matching code out of the migration //! logic. //! //! Note that, although similar to static analysis tools like, e.g. Rust's //! [Clippy](https://doc.rust-lang.org/clippy/), `forc migrate` is significantly //! different. Instead of providing hundreds of independent lints that //! automatically check for localized issues, migrations provide only a handful //! of migration steps, that are orchestrated within a single migration process, //! some of them possibly being interactive. //! //! Each migration step, in general, wants to take a look at a larger scope at a time, //! often a module. This makes a typical approach, of using fine-grain visitor functions //! less applicable. Also, the goal is to empower non-compiler developers to write //! migrations. //! //! All this led to the design in which a single migration step is in focus, and can: //! - search for elements of interest using the match functions, //! - build new and modify existing lexed elements using the [super::modifying], //! //! Migrations will use match functions to either search directly //! within a parent or recursively (deep) within a scope. Match functions can //! accept predicates to filter the searched elements. The predicates deliberately //! accept `&&TElement` or `&&mut TElement` so that can be easily passed to //! [Iterator::filter] function. //! //! For the cases when migrations do target individual expressions, and do not need //! to inspect a larger scope, the visitor pattern is still supported and available //! via the tree visitors that are defined in [super::visiting]. //! //! ## Matching elements in trees //! //! Functions matching on lexed trees are coming in two variants, immutable and mutable. //! They differ in the mutability of their arguments and returned types, but //! otherwise implement the same matching logic. //! //! Matching can be done either directly within a parent, or recursively //! within a scope. E.g., we can match for `StorageField`s that are //! directly under the `storage` declaration, or for all `StorageField`s //! that are in the `storage` declaration, in any of the namespaces, //! recursively. //! //! Searching for elements "in-between", e.g., `StorageField`s in a particular //! sub-namespace, is currently not supported, and must be done manually //! within a migration. //! //! Matching is done on lexical or typed elements like, e.g., `StorageField`, //! or `TyStorageField`, without any more convenient abstraction provided for //! matching. This is also a simple beginning. A better matching framework //! would expose a stable higher level abstraction for matching and modifying. //! //! ## Locating equivalent elements across trees //! //! Often we will find an element in the lexed tree, e.g., a `StorageField` in //! order to change it, but will need additional information from its typed tree //! counterpart, `TyStorageField`, or vice versa. The [TyLocate] trait offers //! the [TyLocate::locate] method for finding a typed equivalent of a lexed //! element. The [LexedLocate] and [LexedLocateMut] do the opposite. //! //! Locating an equivalent will in most of the cases be implemented via equality //! of spans. Locating can also cause multiple traversals of the same part of //! a tree. For migrations, this will not cause a performance problem. mod lexed_tree; mod typed_tree; use sway_ast::attribute::{Annotated, Attribute, AttributeArg}; use sway_ast::{AttributeDecl, ItemFn, PathType}; pub(crate) use typed_tree::matchers as ty_match; pub(crate) use typed_tree::predicates::*; pub(crate) use lexed_tree::matchers as lexed_match; pub(crate) use lexed_tree::matchers_mut as lexed_match_mut; pub(crate) use lexed_tree::predicates::*; /// Matches for typed tree elements of type `T` located **directly** within /// the typed tree element `self`. /// /// The matched elements must satisfy the `predicate`. pub(crate) trait TyElementsMatcher<T> { fn match_elems<'a, P>(&'a self, predicate: P) -> impl Iterator<Item = &'a T> where P: Fn(&&'a T) -> bool + Clone + 'a, T: 'a; } /// Matches for typed tree elements of type `T` located **recursively** within /// the typed tree element `self` or any of its children. The meaning of a /// "child" depends on the exact tree element `self`. /// /// The matched elements must satisfy the `predicate`. pub(crate) trait TyElementsMatcherDeep<T> { fn match_elems_deep<'a, F>(&'a self, predicate: F) -> Vec<&'a T> where F: Fn(&&'a T) -> bool + Clone + 'a, T: 'a; } /// Within a typed tree element `self`, locates and returns the element of type `Ty`, /// that is the typed equivalent of the `lexed_element`. pub(crate) trait TyLocate<Lexed, Ty> { fn locate(&self, lexed_element: &Lexed) -> Option<&Ty>; } /// Matches for lexed tree elements of type `T` located **directly** within /// the lexed tree element `self`. /// /// The matched elements must satisfy the `predicate`. pub(crate) trait LexedElementsMatcherMut<T> { fn match_elems_mut<'a, F>(&'a mut self, predicate: F) -> impl Iterator<Item = &'a mut T> where F: Fn(&&'a mut T) -> bool + Clone + 'a, T: 'a; } pub(crate) trait LexedElementsMatcher<T> { fn match_elems<'a, F>(&'a self, predicate: F) -> impl Iterator<Item = &'a T> where F: Fn(&&'a T) -> bool + Clone + 'a, T: 'a; } /// Matches for lexed tree elements of type `T` located **recursively** within /// the lexed tree element `self` or any of its children. The meaning of a /// "child" depends on the exact tree element `self`. /// /// The matched elements must satisfy the `predicate`. pub(crate) trait LexedElementsMatcherDeepMut<T> { fn match_elems_deep_mut<'a, F>(&'a mut self, predicate: F) -> Vec<&'a mut T> where F: Fn(&&'a mut T) -> bool + Clone + 'a, T: 'a; } pub(crate) trait LexedElementsMatcherDeep<T> { fn match_elems_deep<'a, F>(&'a self, predicate: F) -> Vec<&'a T> where F: Fn(&&'a T) -> bool + Clone + 'a, T: 'a; } /// Within a lexed tree element `self`, locates and returns the element of type `Lexed`, /// that is the lexed equivalent of the `ty_element`. pub(crate) trait LexedLocateMut<Ty, Lexed> { fn locate_mut(&mut self, ty_element: &Ty) -> Option<&mut Lexed>; } /// Within a lexed tree element `self`, locates and returns the element of type `Lexed`, /// that is the lexed equivalent of the `ty_element`. pub(crate) trait LexedLocate<Ty, Lexed> { fn locate(&self, ty_element: &Ty) -> Option<&Lexed>; } /// Within a lexed tree element `self`, locates and returns the element of type `Lexed`, /// that is the lexed equivalent of the `ty_element`, together with its annotations. pub(crate) trait LexedLocateAnnotatedMut<Ty, Lexed> { fn locate_annotated_mut<'a>( &'a mut self, ty_element: &Ty, ) -> Option<(&'a mut Vec<AttributeDecl>, &'a mut Lexed)>; } /// Within a lexed tree element `self`, locates and returns the element of type `Lexed`, /// that is the lexed equivalent of the `ty_element`, together with its annotations. pub(crate) trait LexedLocateAnnotated<Ty, Lexed> { fn locate_annotated<'a>( &'a self, ty_element: &Ty, ) -> Option<(&'a Vec<AttributeDecl>, &'a Lexed)>; } /// Within an annotated lexed tree element `self`, locates and returns the element of type `LexedAnnotated`, /// that is the annotated lexed equivalent of the `ty_element`. pub(crate) trait LexedLocateAsAnnotatedMut<Ty, LexedAnnotated> { fn locate_as_annotated_mut( &mut self, ty_element: &Ty, ) -> Option<&mut Annotated<LexedAnnotated>>; } /// Within an annotated lexed tree element `self`, locates and returns the element of type `LexedAnnotated`, /// that is the annotated lexed equivalent of the `ty_element`. pub(crate) trait LexedLocateAsAnnotated<Ty, LexedAnnotated> { fn locate_as_annotated(&self, ty_element: &Ty) -> Option<&Annotated<LexedAnnotated>>; } impl<T, Ty, Lexed> LexedLocateMut<Ty, Lexed> for T where T: LexedLocateAnnotatedMut<Ty, Lexed>, { fn locate_mut(&mut self, ty_element: &Ty) -> Option<&mut Lexed> { self.locate_annotated_mut(ty_element) .map(|annotated| annotated.1) } } impl<T, Ty, Lexed> LexedLocate<Ty, Lexed> for T where T: LexedLocateAnnotated<Ty, Lexed>, { fn locate(&self, ty_element: &Ty) -> Option<&Lexed> { self.locate_annotated(ty_element) .map(|annotated| annotated.1) } } /// A predicate that returns true for any immutable input. pub(crate) fn any<T>(_t: &&T) -> bool { true } /// A predicate that returns true for any mutable input. pub(crate) fn any_mut<T>(_t: &&mut T) -> bool { true } /// Returns a predicate that evaluates to true if all the predicates passed /// as arguments evaluate to true. #[macro_export] macro_rules! all_of { ($($i:expr),+) => { $crate::matching::all_of([$($i, )*].as_slice()) }; } /// Returns a predicate that evaluates to true if all the `predicates` /// evaluate to true. /// /// Not intended to be used directly. Use [all_of!] macro instead. #[allow(dead_code)] pub(crate) fn all_of<T, P>(predicates: &[P]) -> impl Fn(&&T) -> bool + Clone + '_ where P: Fn(&&T) -> bool + Clone, { move |t: &&T| { let mut res = true; for predicate in predicates { res &= predicate(t); } res } } /// Returns a predicate that evaluates to true if all the predicates passed /// as arguments evaluate to true. #[macro_export] macro_rules! all_of_mut { ($($i:expr),+) => { $crate::matching::all_of_mut([$($i, )*].as_slice()) }; } /// Returns a predicate that evaluates to true if all the `predicates` /// evaluate to true. /// /// Not intended to be used directly. Use [all_of_mut!] macro instead. #[allow(dead_code)] pub(crate) fn all_of_mut<T, P>(predicates: &[P]) -> impl Fn(&&mut T) -> bool + Clone + '_ where P: Fn(&&mut T) -> bool + Clone, { move |t: &&mut T| { let mut res = true; for predicate in predicates { res &= predicate(t); } res } } /// Returns a predicate that evaluates to true if any of the predicates passed /// as arguments evaluate to true. #[macro_export] macro_rules! any_of { ($($i:expr),+) => { $crate::matching::any_of([$($i, )*].as_slice()) }; } /// Returns a predicate that evaluates to true if any of the `predicates` /// evaluate to true. /// /// Not intended to be used directly. Use [any_of!] macro instead. #[allow(dead_code)] pub(crate) fn any_of<T, P>(predicates: &[P]) -> impl Fn(&&T) -> bool + Clone + '_ where P: Fn(&&T) -> bool + Clone, { move |t: &&T| { let mut res = false; for predicate in predicates { res |= predicate(t); } res } } /// Returns a predicate that evaluates to true if any of the predicates passed /// as arguments evaluate to true. #[macro_export] macro_rules! any_of_mut { ($($i:expr),+) => { $crate::matching::any_of_mut([$($i, )*].as_slice()) }; } /// Returns a predicate that evaluates to true if any of the `predicates` /// evaluate to true. /// /// Not intended to be used directly. Use [any_of_mut!] macro instead. #[allow(dead_code)] pub(crate) fn any_of_mut<T, P>(predicates: &[P]) -> impl Fn(&&mut T) -> bool + Clone + '_ where P: Fn(&&mut T) -> bool + Clone, { move |t: &&mut T| { let mut res = false; for predicate in predicates { res |= predicate(t); } res } } /// Trait for inspecting if a tree element has the expected name. pub(crate) trait WithName { /// Returns true if `Self` has the name `name`. fn with_name<N: AsRef<str> + ?Sized>(&self, name: &N) -> bool; } /// Returns a predicate that evaluates to true if a [WithName] /// implementor has the name equal to `name`. pub(crate) fn with_name<T, N>(name: &N) -> impl Fn(&&T) -> bool + Clone + '_ where T: WithName, N: AsRef<str> + ?Sized, { move |t: &&T| t.with_name(name) } /// Returns a predicate that evaluates to true if a [WithName] /// implementor has the name equal to `name`. pub(crate) fn with_name_mut<T, N>(name: &N) -> impl Fn(&&mut T) -> bool + Clone + '_ where T: WithName, N: AsRef<str> + ?Sized, { move |t: &&mut T| t.with_name(name) } impl WithName for Attribute { fn with_name<N: AsRef<str> + ?Sized>(&self, name: &N) -> bool { self.name.as_str() == name.as_ref() } } impl WithName for AttributeArg { fn with_name<N: AsRef<str> + ?Sized>(&self, name: &N) -> bool { self.name.as_str() == name.as_ref() } } impl WithName for PathType { fn with_name<N: AsRef<str> + ?Sized>(&self, name: &N) -> bool { self.last_segment().name.as_str() == name.as_ref() } } impl WithName for ItemFn { fn with_name<N: AsRef<str> + ?Sized>(&self, name: &N) -> bool { self.fn_signature.name.as_str() == name.as_ref() } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/matching/lexed_tree.rs
forc-plugins/forc-migrate/src/matching/lexed_tree.rs
//! This module contains helper functions for matching elements within a mutable or immutable lexed tree. //! Functions are grouped in two submodules, [self::matchers] and [self::matchers_mut]. Both modules //! contain the same functions, that differ only in the mutability of their arguments and returned types. use super::*; use duplicate::duplicate_item; use sway_ast::{ attribute::Annotated, ItemImpl, ItemKind, ItemStorage, Module, StorageEntry, StorageField, }; use sway_ast::{Literal, PathType}; use sway_core::language::{ lexed::{LexedModule, LexedProgram}, ty::TyImplSelfOrTrait, }; use sway_types::Spanned; // To avoid extensive code duplication, the `duplicate_item` macro is used. // When adding new matchers, the proposed and simplest approach is the following: // - implement either a mutable or immutable version, as needed in the concrete new migration step. // - keep the new matcher function or trait implementation at first out of the `matchers/_mut` modules // and use it in the concrete migration step directly. // - once properly tested, move the function or trait implementation inside of the `__mod_name` // and perform the replacements of used identifiers. E.g., replace every occurrence of `iter` or // `iter_mut` with `__iter`. // We need to specify `self` explicitly so that `duplicate_item` can produce // both variants: `self: &'a Self` and `self: &'a mut Self`. #[allow(clippy::needless_arbitrary_self_type)] // We need to specify `'a` explicitly to be able to specify template implementation // that will work both for immutable and mutable case. #[allow(clippy::needless_lifetimes)] #[duplicate_item( // Module name, `matchers` or `matchers_mut`. __mod_name // Traits to implement. __ElementsMatcher __ElementsMatcherDeep __LocateAnnotated __LocateAsAnnotated // Trait methods. __match_elems __match_elems_deep __locate_annotated __locate_as_annotated // Common implementation elements, e.g. functions like `iter().` __ref_type(type) __ref_mut(value) __ref(value) __iter __as_ref_mut __any; [matchers] [LexedElementsMatcher] [LexedElementsMatcherDeep] [LexedLocateAnnotated] [LexedLocateAsAnnotated] [match_elems] [match_elems_deep] [locate_annotated] [locate_as_annotated] [&'a type] [value] [&value] [iter] [as_ref] [any]; [matchers_mut] [LexedElementsMatcherMut] [LexedElementsMatcherDeepMut] [LexedLocateAnnotatedMut] [LexedLocateAsAnnotatedMut] [match_elems_mut] [match_elems_deep_mut] [locate_annotated_mut] [locate_as_annotated_mut] [&'a mut type] [ref mut value] [&mut value] [iter_mut] [as_mut] [any_mut]; )] #[allow(dead_code)] pub mod __mod_name { use super::*; impl __ElementsMatcher<ItemFn> for Module { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([ItemFn])> where F: Fn(&__ref_type([ItemFn])) -> bool + Clone + 'a, ItemFn: 'a, { self.items .__iter() .map(|annotated| __ref([annotated.value])) .filter_map(|decl| match decl { sway_ast::ItemKind::Fn(module_fn) => Some(module_fn), _ => None, }) .filter(predicate) } } impl __ElementsMatcher<ItemStorage> for LexedProgram { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([ItemStorage])> where F: Fn(&__ref_type([ItemStorage])) -> bool + Clone + 'a, ItemStorage: 'a, { // Storage can be declared only in the root module of a contract. self.root.__match_elems(predicate) } } impl __ElementsMatcher<ItemStorage> for LexedModule { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([ItemStorage])> where F: Fn(&__ref_type([ItemStorage])) -> bool + Clone + 'a, ItemStorage: 'a, { self.tree .value .items .__iter() .map(|annotated_item| __ref([annotated_item.value])) .filter_map(move |decl| match decl { ItemKind::Storage(__ref_mut([item_storage])) => { if predicate(&item_storage) { Some(item_storage) } else { None } } _ => None, }) } } impl __ElementsMatcher<StorageField> for ItemStorage { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([StorageField])> where F: Fn(&__ref_type([StorageField])) -> bool + Clone + 'a, StorageField: 'a, { self.entries .inner .__iter() .map(|annotated_item| __ref([annotated_item.value])) .filter_map(move |storage_entry| { storage_entry .field .__as_ref_mut() .filter(|sf| predicate(sf)) }) } } impl __ElementsMatcherDeep<StorageField> for ItemStorage { fn __match_elems_deep<'a, F>( self: __ref_type([Self]), predicate: F, ) -> Vec<__ref_type([StorageField])> where F: Fn(&__ref_type([StorageField])) -> bool + Clone + 'a, StorageField: 'a, { fn recursively_collect_storage_fields_in_storage_entry<'a, P>( result: &mut Vec<__ref_type([StorageField])>, predicate: P, storage_entry: __ref_type([StorageEntry]), ) where P: Fn(&__ref_type([StorageField])) -> bool + Clone + 'a, { if let Some(sf) = __ref([storage_entry.field]) { if predicate(&sf) { result.push(sf) } } if let Some(namespace) = __ref([storage_entry.namespace]) { namespace .inner .__iter() .map(|annotated_item| __ref([annotated_item.value])) .for_each(|storage_entry| { recursively_collect_storage_fields_in_storage_entry( result, predicate.clone(), storage_entry.__as_ref_mut(), ) }); } } let mut result = vec![]; self.entries .inner .__iter() .map(|annotated_item| __ref([annotated_item.value])) .for_each(|storage_entry| { recursively_collect_storage_fields_in_storage_entry( &mut result, predicate.clone(), storage_entry, ) }); result } } impl __ElementsMatcher<Annotated<ItemKind>> for Module { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([Annotated<ItemKind>])> where F: Fn(&__ref_type([Annotated<ItemKind>])) -> bool + Clone + 'a, Annotated<ItemKind>: 'a, { self.items.__iter().filter(predicate) } } impl __ElementsMatcher<PathType> for ItemImpl { fn __match_elems<'a, F>( self: __ref_type([Self]), predicate: F, ) -> impl Iterator<Item = __ref_type([PathType])> where F: Fn(&__ref_type([PathType])) -> bool + Clone + 'a, PathType: 'a, { self.where_clause_opt .__iter() .flat_map(|where_clause| where_clause.bounds.__iter()) .flat_map(move |bound| bound.bounds.__iter().filter(predicate.clone())) } } impl __LocateAnnotated<TyImplSelfOrTrait, ItemImpl> for Module { fn __locate_annotated<'a>( self: __ref_type([Self]), ty_element: &TyImplSelfOrTrait, ) -> Option<(__ref_type([Vec<AttributeDecl>]), __ref_type([ItemImpl]))> { self.items .__iter() .filter_map(|annotated| match __ref([annotated.value]) { ItemKind::Impl(item_impl) => Some((__ref([annotated.attributes]), item_impl)), _ => None, }) .find(|(_attributes, item_impl)| item_impl.span() == ty_element.span) } } impl __LocateAsAnnotated<TyImplSelfOrTrait, ItemKind> for Module { fn __locate_as_annotated<'a>( self: __ref_type([Self]), ty_element: &TyImplSelfOrTrait, ) -> Option<__ref_type([Annotated<ItemKind>])> { self.items .__iter() .find(|annotated| match &annotated.value { ItemKind::Impl(item_impl) => item_impl.span() == ty_element.span, _ => false, }) } } use sway_ast::{ attribute::{Attribute, AttributeArg}, AttributeDecl, CommaToken, Parens, Punctuated, }; pub(crate) fn storage_decl<'a, P>(parent: __ref_type([P])) -> Option<__ref_type([ItemStorage])> where P: __ElementsMatcher<ItemStorage>, { parent.__match_elems(__any).next() } pub(crate) fn storage_fields<'a, P, F>( parent: __ref_type([P]), predicate: F, ) -> impl Iterator<Item = __ref_type([StorageField])> where F: Fn(&__ref_type([StorageField])) -> bool + Clone + 'a, P: __ElementsMatcher<StorageField>, { parent.__match_elems(predicate) } pub(crate) fn storage_fields_deep<'a, S, F>( scope: __ref_type([S]), predicate: F, ) -> Vec<__ref_type([StorageField])> where F: Fn(&__ref_type([StorageField])) -> bool + Clone + 'a, S: __ElementsMatcherDeep<StorageField>, { scope.__match_elems_deep(predicate) } pub(crate) fn attributes<'a, F>( attributes: __ref_type([[AttributeDecl]]), predicate: F, ) -> impl Iterator<Item = __ref_type([Attribute])> where F: Fn(&__ref_type([Attribute])) -> bool + Clone + 'a, { attributes .__iter() .flat_map(|attr| attr.attribute.inner.__iter()) .filter(predicate) } /// Returns all `cfg` attributes found in `attributes`. pub(crate) fn cfg_attributes<'a>( attributes: __ref_type([[AttributeDecl]]), ) -> impl Iterator<Item = __ref_type([Attribute])> { attributes .__iter() .flat_map(|attr| attr.attribute.inner.__iter()) .filter(|attr| attr.is_cfg()) } /// Returns all `cfg` attributes that act as only attribute within /// an [AttributeDecl] and have exactly one argument. /// /// E.g.: /// - `#[cfg(experimental_feature = true)]` will be returned, /// - `#[cfg(experimental_feature = true, experimental_other_feature = false)]` will not, /// - `#[test, cfg(experimental_feature = true)]` will also not be returned. pub(crate) fn cfg_attributes_standalone_single_arg<'a>( attributes: __ref_type([[AttributeDecl]]), ) -> impl Iterator<Item = __ref_type([Attribute])> { attributes .__iter() .filter(|attr| attr.attribute.inner.iter().count() == 1) .flat_map(|attr| attr.attribute.inner.__iter()) .filter(|attr| attr.is_cfg()) .filter(|attr| { attr.args .as_ref() .is_some_and(|args| args.inner.iter().count() == 1) }) } /// Returns the first [AttributeArg] of the first occurrence of a `cfg` attribute within `attributes`, /// that satisfies the `predicate`. pub(crate) fn cfg_attribute_arg<'a, F>( attributes: __ref_type([[AttributeDecl]]), predicate: F, ) -> Option<__ref_type([AttributeArg])> where F: Fn(&__ref_type([AttributeArg])) -> bool + Clone + 'a, { for cfg_attribute in cfg_attributes(attributes) { match cfg_attribute.args.__as_ref_mut() { Some(args) => match attribute_arg(args, predicate.clone()) { Some(arg) => return Some(arg), None => continue, }, None => continue, } } None } /// Returns the first `cfg` [Attribute] that act as only attribute within /// an [AttributeDecl] and have exactly one argument that satisfies the `predicate`. pub(crate) fn cfg_attribute_standalone_single_arg<'a, N, F>( attributes: __ref_type([[AttributeDecl]]), arg_name: &N, arg_val_predicate: F, ) -> Option<__ref_type([Attribute])> where N: AsRef<str> + ?Sized, F: Fn(&&Option<Literal>) -> bool + Clone, { for cfg_attribute in cfg_attributes_standalone_single_arg(attributes) { // We for sure have a `cfg` attribute with exactly one argument. // Thus, the unwraps are safe. let arg = cfg_attribute .args .as_ref() .unwrap() .inner .iter() .next() .unwrap(); if arg.name.as_str() == arg_name.as_ref() && arg_val_predicate(&&arg.value) { return Some(cfg_attribute); } } None } /// Returns the first attribute in `attributes` that satisfies the `predicate`. pub(crate) fn attribute<'a, F>( attributes: __ref_type([[AttributeDecl]]), predicate: F, ) -> Option<__ref_type([Attribute])> where F: Fn(&__ref_type([Attribute])) -> bool + Clone + 'a, { attributes .__iter() .flat_map(|attr| attr.attribute.inner.__iter()) .find(predicate) } pub(crate) fn attribute_args<'a, F>( attribute_args: __ref_type([Parens<Punctuated<AttributeArg, CommaToken>>]), predicate: F, ) -> impl Iterator<Item = __ref_type([AttributeArg])> where F: Fn(&__ref_type([AttributeArg])) -> bool + Clone + 'a, { attribute_args.inner.__iter().filter(predicate) } pub(crate) fn attribute_arg<'a, F>( attribute_args: __ref_type([Parens<Punctuated<AttributeArg, CommaToken>>]), predicate: F, ) -> Option<__ref_type([AttributeArg])> where F: Fn(&__ref_type([AttributeArg])) -> bool + Clone + 'a, { attribute_args.inner.__iter().find(predicate) } pub(crate) fn impl_self_or_trait_decls_annotated<'a, P>( parent: __ref_type([P]), ) -> impl Iterator<Item = __ref_type([Annotated<ItemKind>])> where P: __ElementsMatcher<Annotated<ItemKind>>, { parent.__match_elems(|annotated| matches!(annotated.value, ItemKind::Impl(_))) } /// Returns all trait constraints for all constrained generic /// arguments in the `parent`, that satisfy the `predicate`. /// The result is flattened and cumulative. This means that /// all the trait constraints will be collected from all /// the constraint arguments, even if there are duplicates. /// /// E.g., for this `where` clause and no predicate: /// ```ignore /// where A: Eq + AbiEncode + SomeTrait, /// B: Eq + SomeTrait, /// ``` /// The returned trait constraints will be: /// ```ignore /// Eq, AbiEncode, SomeTrait, Eq, SomeTrait /// ``` pub(crate) fn trait_constraints<'a, P, F>( parent: __ref_type([P]), predicate: F, ) -> impl Iterator<Item = __ref_type([PathType])> where F: Fn(&__ref_type([PathType])) -> bool + Clone + 'a, P: __ElementsMatcher<PathType>, { parent.__match_elems(predicate) } pub(crate) fn functions<'a, P, F>( parent: __ref_type([P]), predicate: F, ) -> impl Iterator<Item = __ref_type([ItemFn])> where F: Fn(&__ref_type([ItemFn])) -> bool + Clone + 'a, P: __ElementsMatcher<ItemFn>, { parent.__match_elems(predicate) } } #[allow(dead_code)] pub mod predicates { pub mod lexed_storage_field { use super::super::*; pub(crate) fn with_in_keyword(storage_field: &&StorageField) -> bool { storage_field.key_expr.is_some() } pub(crate) fn without_in_keyword(storage_field: &&mut StorageField) -> bool { storage_field.key_expr.is_none() } } pub mod item_impl { use super::super::*; pub(crate) fn implements_trait<'a, N: AsRef<str> + ?Sized>( trait_name: &'a N, ) -> impl Fn(&&'a ItemImpl) -> bool { move |item_impl: &&ItemImpl| { if let Some((path, _for_token)) = &item_impl.trait_opt { path.last_segment().name.as_str() == trait_name.as_ref() } else { false } } } } pub mod literal { use sway_ast::literal::{LitBool, LitBoolType}; use super::super::*; pub(crate) fn is_bool_true(literal: &Literal) -> bool { matches!( literal, Literal::Bool(LitBool { kind: LitBoolType::True, .. }) ) } pub(crate) fn is_bool_false(literal: &Literal) -> bool { matches!( literal, Literal::Bool(LitBool { kind: LitBoolType::False, .. }) ) } } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false
FuelLabs/sway
https://github.com/FuelLabs/sway/blob/cc8f867043f3ec2c14ec4088d449cde603929a80/forc-plugins/forc-migrate/src/cli/mod.rs
forc-plugins/forc-migrate/src/cli/mod.rs
//! The command line interface for `forc migrate`. mod commands; mod shared; use anyhow::Result; use clap::{Parser, Subcommand}; use forc_tracing::{init_tracing_subscriber, LevelFilter, TracingSubscriberOptions}; use self::commands::{check, run, show}; use check::Command as CheckCommand; use run::Command as RunCommand; use show::Command as ShowCommand; fn help() -> &'static str { Box::leak( format!( "Examples:\n{}{}{}", show::examples(), check::examples(), run::examples(), ) .trim_end() .to_string() .into_boxed_str(), ) } /// Forc plugin for migrating Sway projects to the next breaking change version of Sway. #[derive(Debug, Parser)] #[clap( name = "forc-migrate", after_help = help(), version )] pub(crate) struct Opt { /// The command to run #[clap(subcommand)] command: ForcMigrate, } impl Opt { fn silent(&self) -> bool { match &self.command { ForcMigrate::Show(_) => true, ForcMigrate::Check(command) => command.check.silent, ForcMigrate::Run(command) => command.run.silent, } } } #[derive(Subcommand, Debug)] enum ForcMigrate { Show(ShowCommand), Check(CheckCommand), Run(RunCommand), } pub fn run_cli() -> Result<()> { let opt = Opt::parse(); let tracing_options = TracingSubscriberOptions { silent: Some(opt.silent()), log_level: Some(LevelFilter::INFO), ..Default::default() }; init_tracing_subscriber(tracing_options); match opt.command { ForcMigrate::Show(command) => show::exec(command), ForcMigrate::Check(command) => check::exec(command), ForcMigrate::Run(command) => run::exec(command), } }
rust
Apache-2.0
cc8f867043f3ec2c14ec4088d449cde603929a80
2026-01-04T15:31:58.694488Z
false